library
stringclasses
1 value
test_file
stringclasses
785 values
test_function
stringlengths
1
295
before
stringlengths
0
448k
after
stringlengths
0
487k
context_before
stringclasses
947 values
context_after
stringlengths
0
16.3k
commit_before
stringclasses
1 value
commit_after
stringclasses
1 value
change_type
stringclasses
3 values
torch
test/test_jit_cuda_fuser.py
test_binary_ops_channels_last_with_bcast
def test_binary_ops_channels_last_with_bcast(self): device = "cuda" x = torch.randn([4, 3, 2, 5], device=device).to(memory_format=torch.channels_last) w = torch.randn([2, 5], device=device) def t(x: torch.Tensor, b: torch.Tensor): o = x + b return torch.relu(o) t_jit = torch.jit.script(t) jit_o = t_jit(x, w) jit_o = t_jit(x, w) jit_o = t_jit(x, w) o = t(x, w) self.assertEqual(o.dtype, jit_o.dtype) self.assertTrue(self._compare("comparing output failed", o, jit_o, 1e-4)) self.assertGraphContains(t_jit.graph_for(x, w), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER class TestCudaFuser(JitTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
__init__
def __init__(self): self.old_cpu_fuse = torch._C._jit_can_fuse_on_cpu() self.old_gpu_fuse = torch._C._jit_can_fuse_on_gpu() torch._C._jit_override_can_fuse_on_cpu(False) torch._C._jit_override_can_fuse_on_gpu(False) self.old_guard = torch._C._jit_set_nvfuser_guard_mode(False) torch._C._debug_set_autodiff_subgraph_inlining(False) self.old_value = torch._C._jit_set_autocast_mode(True) if(RUN_CUDA): self.old_nvfuser = torch._C._jit_set_nvfuser_enabled(True)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER class CudaFuserTestOptions():
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
forward
def forward(self, x: torch.Tensor, y: torch.Tensor): o = torch.add(x, y) o = torch.sum(o, dim=self.reduction_axis, keepdim=self.keepdim) return o
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER class MyReduction(torch.nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
test_reduction
def test_reduction(self): for x in ([7, 8, 12], [12, 8, 7, 9, 15], [128, 16, 8, 32]): # note that num_dim is exclusive from len(x), so we are not reducing # to single element (codegen limitation at this moment) for num_reduce_dim in range(1, len(x)): for axes in itertools.combinations(range(len(x)), num_reduce_dim): for keepdim in (True, False): perm0 = range(len(x)) perm1 = range(len(x)) self._reduction_helper(x, axes, torch.float32, "cuda", perm0, perm1, keepdim)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER class TestCudaFuser(JitTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t_wb
def t_wb(shapes: List[int], x, w, b, eps: float, cudnn: bool): o = torch.layer_norm(x, shapes, w, b, eps, cudnn) o = torch.relu(o) return o
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t_w
def t_w(shapes: List[int], x, w, eps: float, cudnn: bool): o = torch.layer_norm(x, shapes, w, None, eps, cudnn) o = torch.relu(o) return o
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t_b
def t_b(shapes: List[int], x, b, eps: float, cudnn: bool): o = torch.layer_norm(x, shapes, None, b, eps, cudnn) o = torch.relu(o) return o
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
rsub
def rsub(x: torch.Tensor, y: torch.Tensor): o = torch.rsub(x, y) o = o * 2. return o rsub_jit = torch.jit.script(rsub) self._run_helper(rsub_jit, rsub, x, y)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
add
def add(x: torch.Tensor, other: torch.Tensor, alpha: float): o = torch.relu(x) o = torch.add(o, other=other, alpha=alpha) return o add_jit = torch.jit.script(add) self._run_helper(add_jit, add, x, y, 2.0)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
clamp0
def clamp0(x: torch.Tensor, f: int): o = 2. * torch.clamp(x, min=f) return o clamp0_jit = torch.jit.script(clamp0) self._run_helper(clamp0_jit, clamp0, x, arg2)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
clamp1
def clamp1(x: torch.Tensor, f: int, ff: int): o = 2. * torch.clamp(x, min=f, max=ff) return o clamp1_jit = torch.jit.script(clamp1) self._run_helper(clamp1_jit, clamp1, x, arg2, arg3)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
threshold
def threshold(x: torch.Tensor, th: int, val: int): o = 2. * torch.threshold(x, th, val) return o threshold_jit = torch.jit.script(threshold) self._run_helper(threshold_jit, threshold, x, arg2, arg3)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
where
def where(x: torch.Tensor, y: torch.Tensor, cond: torch.Tensor): o = 2. * torch.where(cond, x, y) return o where_jit = torch.jit.script(where) self._run_helper(where_jit, where, x, y, cond)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
lerp
def lerp(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor): o = 2. * torch.lerp(x, y, z) return o lerp_jit = torch.jit.script(lerp) self._run_helper(lerp_jit, lerp, x, y, z)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
lerp_scale
def lerp_scale(x: torch.Tensor, y: torch.Tensor, z: float): o = 2. * torch.lerp(x, y, z) return o lerp_scale_jit = torch.jit.script(lerp_scale) self._run_helper(lerp_scale_jit, lerp_scale, x, y, 0.5)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
test_layer_norm_parser
def test_layer_norm_parser(self): dtype = torch.float32 device = "cuda" x = torch.randn([4, 4, 2], dtype=dtype, device=device) w = torch.randn([4, 2], dtype=dtype, device=device) b = torch.randn([4, 2], dtype=dtype, device=device) def t(x: torch.Tensor, w: torch.Tensor, b: torch.Tensor): o = torch.relu(x) o = torch.layer_norm(o, [4, 2], w, b, 1e-5) return o o = t(x, w, b) t_jit = torch.jit.script(t) jit_o = t_jit(x, w, b) jit_o = t_jit(x, w, b) o = t(x, w, b) self.assertGraphContains(t_jit.graph_for(x, w, b), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER class TestCudaFuser(JitTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
__init__
def __init__(self): self.old_cpu_fuse = torch._C._jit_can_fuse_on_cpu() self.old_gpu_fuse = torch._C._jit_can_fuse_on_gpu() torch._C._jit_override_can_fuse_on_cpu(False) torch._C._jit_override_can_fuse_on_gpu(False) self.old_guard = torch._C._jit_set_nvfuser_guard_mode(False) torch._C._debug_set_autodiff_subgraph_inlining(False) self.old_value = torch._C._jit_set_autocast_mode(True) if(RUN_CUDA): self.old_nvfuser = torch._C._jit_set_nvfuser_enabled(True)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER class CudaFuserTestOptions():
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
forward
def forward(self, x: torch.Tensor, y: torch.Tensor): o = torch.add(x, y) o = torch.sum(o, dim=self.reduction_axis, keepdim=self.keepdim) return o
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER class MyReduction(torch.nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
test_native_layer_norm_bfloat
def test_native_layer_norm_bfloat(self): dims = 4 rnds = 3 for idx in range(rnds): for offset in range(1, dims): input_shape = [random.randint(10, 30) for idx in range(dims)] norm_shape = [input_shape[idx] for idx in range(dims - offset, dims)] self._native_layer_norm_helper(input_shape, norm_shape, torch.bfloat16, "cuda", 1e-1)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER class TestCudaFuser(JitTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
forward
def forward(self, x: torch.Tensor, y: torch.Tensor): o = torch.add(x, y) o = torch.sum(o, dim=self.reduction_axis, keepdim=self.keepdim) return o
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER class MyReduction(torch.nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
forward
def forward(self, x: torch.Tensor, y: torch.Tensor): o = torch.add(x, y) o = torch.sum(o, dim=self.reduction_axis, keepdim=self.keepdim) return o
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER class MyReduction(torch.nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
__init__
def __init__(self): self.old_cpu_fuse = torch._C._jit_can_fuse_on_cpu() self.old_gpu_fuse = torch._C._jit_can_fuse_on_gpu() torch._C._jit_override_can_fuse_on_cpu(False) torch._C._jit_override_can_fuse_on_gpu(False) self.old_guard = torch._C._jit_set_nvfuser_guard_mode(False) torch._C._debug_set_autodiff_subgraph_inlining(False) self.old_value = torch._C._jit_set_autocast_mode(True) if(RUN_CUDA): self.old_nvfuser = torch._C._jit_set_nvfuser_enabled(True)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER class CudaFuserTestOptions():
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
forward
def forward(self, x: torch.Tensor, y: torch.Tensor): o = torch.add(x, y) o = torch.sum(o, dim=self.reduction_axis, keepdim=self.keepdim) return o
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER class MyReduction(torch.nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t_wb
def t_wb(shapes: List[int], x, w, b, eps: float, cudnn: bool): o = torch.layer_norm(x, shapes, w, b, eps, cudnn) o = torch.relu(o) return o
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
test_norm_bfloat
def test_norm_bfloat(self): output_elements = 10000 channel_sizes = [67, 457, 1024, 4096] with torch.backends.cudnn.flags(enabled=False): # TODO instance norm on ROCm was giving ~50% incorrect results for is_batch_norm_else_instance_norm in [True] if TEST_WITH_ROCM else [False, True]: for dims in range(3, 6): output_size = int(pow(output_elements, 1. / (dims - 1))) for C in channel_sizes: x = [output_size for idx in range(dims)] x[1] = C self._norm_helper(x, torch.bfloat16, "cuda", 1e-1, is_batch_norm_else_instance_norm)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER class TestCudaFuser(JitTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
__init__
def __init__(self): self.old_cpu_fuse = torch._C._jit_can_fuse_on_cpu() self.old_gpu_fuse = torch._C._jit_can_fuse_on_gpu() torch._C._jit_override_can_fuse_on_cpu(False) torch._C._jit_override_can_fuse_on_gpu(False) self.old_guard = torch._C._jit_set_nvfuser_guard_mode(False) torch._C._debug_set_autodiff_subgraph_inlining(False) self.old_value = torch._C._jit_set_autocast_mode(True) if(RUN_CUDA): self.old_nvfuser = torch._C._jit_set_nvfuser_enabled(True)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER class CudaFuserTestOptions():
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
forward
def forward(self, x: torch.Tensor, y: torch.Tensor): o = torch.add(x, y) o = torch.sum(o, dim=self.reduction_axis, keepdim=self.keepdim) return o
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER class MyReduction(torch.nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
fn
def fn(x: int): y = torch.zeros((3, 4, x, x + 2)).cuda() for i in range(2): inp = torch.rand((3, 4, x, x + i)).cuda() weight = torch.rand((x + 2, x + i)).cuda() bias = torch.rand((x, x + 2)).cuda() y += torch.sin(torch.nn.functional.linear(inp, weight, bias)) return y fn_s = torch.jit.script(fn) fn_s(5) fn_s(5)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
fn
def fn(x: int): y = torch.zeros((3, 4, x, x + 2)).cuda() for i in range(2): inp = torch.rand((3, 4, x, x + i)).cuda() weight = torch.rand((x + 2, x + i)).cuda() bias = torch.rand((x, x + 2)).cuda() y += torch.sin(torch.nn.functional.linear(inp, weight, bias)) return y fn_s = torch.jit.script(fn) fn_s(5) fn_s(5)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
test1
def test1(x: torch.Tensor, y: torch.Tensor): o = torch.add(x, y) o = torch.add(o, y) o = torch.add(o, y) o = torch.add(o, y) o = o + 1.0 return o test1_jit = torch.jit.script(test1) for i in range(3): jit_o = test1_jit(x, y) jit_o.backward(grad) bwd_graph = list( list(test1_jit.get_debug_state().execution_plans.values())[ 0].code.grad_executor_states()[0].execution_plans.values() )[0].graph FileCheck().check(FUSION_GROUP).run(bwd_graph) self.assertEqual(x.grad.dtype, x.dtype) self.assertEqual(y.grad.dtype, y.dtype)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t2
def t2(x: torch.Tensor, p: float, train: bool): o = torch.nn.functional.softmax(x, dim=-1) o = torch.nn.functional.dropout(o, p, training=train) return o # disabling cache so new inputs would generate new graph t.__disable_jit_function_caching__ = True t2.__disable_jit_function_caching__ = True for fn in [t, t2]: for m_format in [torch.contiguous_format, torch.channels_last]: fn_jit = torch.jit.script(fn) x = torch.randn(sizes, dtype=dtype, device=device, requires_grad=True).to(memory_format=m_format) grads = torch.randn(sizes, dtype=dtype, device=device).to(memory_format=m_format) # The drop probability needs to be set to zero given that the order of picking random # numbers between eager mode and the jit is different self._run_training_helper(fn_jit, fn, grads, x, 0.0, True)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
test_graph_rng
def test_graph_rng(self): self.assertTrue(torch._C._jit_nvfuser_enabled()) size = 10000 a = torch.randn((size,), device="cuda", dtype=torch.float) def t(x): o = x + 1.0 o = torch.nn.functional.dropout(o, p=0.1) o = o + 1.0 o = torch.nn.functional.dropout(o, p=0.1) return o t_jit = torch.jit.script(t) for _ in range(3): t_jit(a) self.assertGraphContainsExactly(t_jit.graph_for(a), FUSION_GUARD, 1) # Control (jitted, ungraphed) torch.cuda.manual_seed(5) eager_out = a.clone() for _ in range(3): eager_out = t_jit(eager_out) graph_in = a.clone() g = torch.cuda.CUDAGraph() s = torch.cuda.Stream() s.wait_stream(torch.cuda.current_stream()) with torch.cuda.stream(s): torch.cuda.manual_seed(5) g.capture_begin() graph_out = t_jit(graph_in) g.capture_end() torch.cuda.current_stream().wait_stream(s) # g is now a jitted, graphed version of t. # Runs a (jitted, graphed) -> (jitted, ungraphed) -> (jitted, graphed) sequence. # The ops in the overall sequence should be the same as Control. g.replay() # graph_out is now filled with g's result. Use it as ungraphed input. out = t_jit(graph_out) graph_in.copy_(out) g.replay() # If replay() updated RNG state correctly, graph_out should now equal eager_out self.assertEqual(graph_out, eager_out)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER class TestCudaFuser(JitTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
__init__
def __init__(self): self.old_cpu_fuse = torch._C._jit_can_fuse_on_cpu() self.old_gpu_fuse = torch._C._jit_can_fuse_on_gpu() torch._C._jit_override_can_fuse_on_cpu(False) torch._C._jit_override_can_fuse_on_gpu(False) self.old_guard = torch._C._jit_set_nvfuser_guard_mode(False) torch._C._debug_set_autodiff_subgraph_inlining(False) self.old_value = torch._C._jit_set_autocast_mode(True) if(RUN_CUDA): self.old_nvfuser = torch._C._jit_set_nvfuser_enabled(True)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER class CudaFuserTestOptions():
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
forward
def forward(self, x: torch.Tensor, y: torch.Tensor): o = torch.add(x, y) o = torch.sum(o, dim=self.reduction_axis, keepdim=self.keepdim) return o
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER class MyReduction(torch.nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
shifted_softplus
def shifted_softplus(x: torch.Tensor, shift: float): return functional.softplus(x) - shift jitted = torch.jit.script(shifted_softplus) inp = torch.randn(4, 2, dtype=torch.float32, device="cuda").requires_grad_() inp_ref = inp.detach().clone().requires_grad_() grad = torch.randn(4, 2, dtype=torch.float32, device="cuda") aten_o = shifted_softplus(inp_ref, 0.693147) aten_o.backward(grad) aten_grad = inp_ref.grad for i in range(3): jit_o = jitted(inp, 0.693147) inp.grad = None # avoid accumulation on grad jit_o.backward(grad) jit_grad = inp.grad assert torch.allclose(jit_o, aten_o) assert torch.allclose(jit_grad, aten_grad) self.assertGraphContains(jitted.graph_for(inp, 0.693147), FUSION_GROUP, True)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
repro
def repro(x: torch.Tensor, alpha: float): o = torch.rand_like(x) o = torch.add(o, alpha) return o repro_jit = torch.jit.script(repro) self._run_helper(repro_jit, repro, x, 0.6)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t_not_fused
def t_not_fused(x: torch.Tensor, w: torch.Tensor): o = torch.nn.functional.conv2d(x, w) return o.relu() jitted_not_fused = torch.jit.script(t_not_fused) for i in range(3): jit_o = jitted_not_fused(inp, weight) graph = jitted_not_fused.graph_for(inp) self.assertGraphContainsExactly(graph, FUSION_GROUP, 0) self.assertGraphContains(graph, 'aten::relu', True)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t_bias
def t_bias(x: torch.Tensor, w: torch.Tensor, bias: torch.Tensor): o = torch.nn.functional.conv2d(x, w, bias) return o.relu() jitted_bias = torch.jit.script(t_bias) for i in range(3): jit_o = jitted_bias(inp, weight, bias) graph = jitted_bias.graph_for(inp) self.assertGraphContains(graph, FUSION_GROUP, True) self.assertGraphContains(graph, 'prim::add_optional', True)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
__init__
def __init__(self): self.old_cpu_fuse = torch._C._jit_can_fuse_on_cpu() self.old_gpu_fuse = torch._C._jit_can_fuse_on_gpu() torch._C._jit_override_can_fuse_on_cpu(False) torch._C._jit_override_can_fuse_on_gpu(False) self.old_guard = torch._C._jit_set_nvfuser_guard_mode(False) torch._C._debug_set_autodiff_subgraph_inlining(False) self.old_value = torch._C._jit_set_autocast_mode(True) if(RUN_CUDA): self.old_nvfuser = torch._C._jit_set_nvfuser_enabled(True)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER class CudaFuserTestOptions():
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
forward
def forward(self, x: torch.Tensor, y: torch.Tensor): o = torch.add(x, y) o = torch.sum(o, dim=self.reduction_axis, keepdim=self.keepdim) return o
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER class MyReduction(torch.nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
test_fix_shape_expression_bn
def test_fix_shape_expression_bn(self): class MyModule(torch.nn.Module): def __init__(self, num_features=4): super().__init__() self.bn = torch.nn.BatchNorm2d(num_features) def forward(self, x, y): out1 = self.bn(x) out2 = out1 + y out3 = torch.relu(out2) return out3 t = MyModule(4).float().cuda() jitted = torch.jit.script(t) x = torch.randn(3, 4, 2, 5, dtype=torch.float32, device="cuda") y = torch.randn(3, 4, 2, 5, dtype=torch.float32, device="cuda") with torch.cuda.amp.autocast(True): for i in range(5): jit_o = jitted(x, y) jit_o = jitted(x, y) o = t(x, y) self.assertTrue(torch.allclose(jit_o, o)) graph = jitted.graph_for(x, y) self.assertGraphContains(graph, FUSION_GROUP, True)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER class TestCudaFuser(JitTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
__init__
def __init__(self): self.old_cpu_fuse = torch._C._jit_can_fuse_on_cpu() self.old_gpu_fuse = torch._C._jit_can_fuse_on_gpu() torch._C._jit_override_can_fuse_on_cpu(False) torch._C._jit_override_can_fuse_on_gpu(False) self.old_guard = torch._C._jit_set_nvfuser_guard_mode(False) torch._C._debug_set_autodiff_subgraph_inlining(False) self.old_value = torch._C._jit_set_autocast_mode(True) if(RUN_CUDA): self.old_nvfuser = torch._C._jit_set_nvfuser_enabled(True)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER class CudaFuserTestOptions():
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
forward
def forward(self, x: torch.Tensor, y: torch.Tensor): o = torch.add(x, y) o = torch.sum(o, dim=self.reduction_axis, keepdim=self.keepdim) return o
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER class MyReduction(torch.nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
forward
def forward(self, x: torch.Tensor, y: torch.Tensor): o = torch.add(x, y) o = torch.sum(o, dim=self.reduction_axis, keepdim=self.keepdim) return o
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER class MyReduction(torch.nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
test_nested_view
def test_nested_view(self): self._ltc_helper([256, 128, 1024], torch.float, 'cuda', 1e-6)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER class TestCudaFuser(JitTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
_bias_squeeze_relu_helper
def _bias_squeeze_relu_helper(self, shape, dtype, device, error): class BiasSqueezeRelu(torch.nn.Module): def forward(self, inputs: torch.Tensor, bias: torch.Tensor): o = inputs + bias o = torch.squeeze(o) return torch.relu(o) t = BiasSqueezeRelu() x = torch.randn(shape, dtype=dtype, device=device, requires_grad=False) bias = torch.randn(shape, dtype=dtype, device=device, requires_grad=False) t_jit = torch.jit.script(t) jit_o = t_jit(x, bias) jit_o = t_jit(x, bias) jit_o = t_jit(x, bias) o = t(x, bias) self.assertEqual(o.dtype, jit_o.dtype) self.assertTrue(self._compare("comparing output failed", o, jit_o, error)) graph = t_jit.graph_for(x, bias) self.assertGraphContains(graph, FUSION_GUARD) self.assertGraphContains(graph, 'prim::squeeze_copy', True)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER class TestCudaFuser(JitTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
forward
def forward(self, x: torch.Tensor, y: torch.Tensor): o = torch.add(x, y) o = torch.sum(o, dim=self.reduction_axis, keepdim=self.keepdim) return o
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER class MyReduction(torch.nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
forward
def forward(self, x: torch.Tensor, y: torch.Tensor): o = torch.add(x, y) o = torch.sum(o, dim=self.reduction_axis, keepdim=self.keepdim) return o
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER class MyReduction(torch.nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
test_squeeze_zero
def test_squeeze_zero(self): x = torch.tensor(1.0, dtype=torch.float, device="cuda") def squeeze_0(x: torch.Tensor): o = x + 1. o = torch.squeeze(o, 0) o = o * 2. return o def squeeze_1(x: torch.Tensor): o = x + 1. o = torch.squeeze(o, -1) o = o + .5 return o squeeze_0_jit = torch.jit.script(squeeze_0) self._run_helper(squeeze_0_jit, squeeze_0, x) squeeze_1_jit = torch.jit.script(squeeze_1) self._run_helper(squeeze_1_jit, squeeze_1, x)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER class TestCudaFuser(JitTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
squeeze_0
def squeeze_0(x: torch.Tensor): o = x + 1. o = torch.squeeze(o, 0) o = o * 2. return o
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
squeeze_1
def squeeze_1(x: torch.Tensor): o = x + 1. o = torch.squeeze(o, -1) o = o + .5 return o squeeze_0_jit = torch.jit.script(squeeze_0) self._run_helper(squeeze_0_jit, squeeze_0, x) squeeze_1_jit = torch.jit.script(squeeze_1) self._run_helper(squeeze_1_jit, squeeze_1, x)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_jit_cuda_fuser.py
t
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float): o_16 = torch.add(x, y) o_32_a = torch.add(y, z, alpha=alpha) o_32_b = torch.add(o_16, z) return (o_16, o_32_a, o_32_b) t_jit = torch.jit.script(t) alpha = 0.5 # stick to integers, this avoid the numerical difference due to our # promotion x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda") jit_o = t_jit(x, y, z, alpha) jit_o = t_jit(x, y, z, alpha) o = t(x, y, z, alpha) for oo, jit_oo in zip(o, jit_o): self.assertEqual(oo.dtype, jit_oo.dtype) self.assertEqual(oo, jit_oo) self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
import contextlib import unittest import os import random import enum import copy from functools import reduce import operator import warnings import torch from torch.nn import functional from torch.profiler import profile, ProfilerActivity from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes from torch.testing._internal.common_jit import JitCommonTestCase from torch.testing._internal.common_methods_invocations import op_db, SampleInput from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \ is_iterable_of_tensors, freeze_rng_state, skipIfRocm from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn from torch.testing import FileCheck from jit.test_fuser_common import TestFuserCommon # noqa: F401 import itertools import numpy as np import math from torch.autograd.gradcheck import gradcheck from typing import List RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM CUDA_MAJOR, CUDA_MINOR = 0, 0 os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE'] os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE'] os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' FUSION_GROUP = 'prim::CudaFusionGroup' FUSION_GUARD = 'prim::CudaFusionGuard' ALIAS_TEST_DISABLED = True TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported() TEST_LARGE_TENSOR = RUN_NVFUSER
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted