entry_point
stringlengths 1
65
| original_triton_code
stringlengths 4.5k
619k
| python_code
stringlengths 208
60.9k
| triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
sequencelengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
|
---|---|---|---|---|---|---|---|---|---|---|---|
CONV1d_FusionBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/ft/cftolilb4hpp6x4jz373vvawpu3u2polwikl3m7dajejar4z7n57.py
# Topologically Sorted Source Nodes: [out_part_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# out_part_1 => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%slice_2, %primals_2, %primals_3, [1, 1, 1], [1, 0, 0], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16) % 4
x2 = (xindex // 64)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x2) + (64*x1)), xmask)
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/uh/cuhjxm3dl2fh55vwyoyoca45ux3zu24m4hknlwots5kyc6p2w4oz.py
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_2,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 4
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = x1
tmp1 = tl.full([1], 2, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.load(in_ptr0 + (x3), tmp2 & xmask, other=0.0)
tmp4 = tmp0 < tmp1
tmp5 = tl.load(in_ptr1 + (x0 + (16*x2) + (64*x1)), tmp4 & xmask, other=0.0)
tmp6 = tl.load(in_ptr2 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = 0.0
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tl.where(tmp2, tmp3, tmp11)
tl.store(out_ptr0 + (x3), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 2, 3, 1, 1), (6, 3, 1, 1, 1))
assert_size_stride(primals_3, (2, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 2, 4, 4, 4), (128, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_part_1], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(primals_1, buf0, 128, grid=grid(128), stream=stream0)
# Topologically Sorted Source Nodes: [out_part_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1, 1), padding=(1, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf1, (1, 2, 4, 4, 4), (128, 64, 16, 4, 1))
del buf0
buf2 = empty_strided_cuda((1, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(primals_1, buf1, primals_3, buf2, 256, grid=grid(256), stream=stream0)
del buf1
del primals_3
return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_2, reinterpret_tensor(primals_1, (1, 2, 4, 4, 4), (256, 16, 64, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((2, 2, 3, 1, 1), (6, 3, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.parallel
import torch.optim
import torch
import torch.nn as nn
class CONV1d_FusionBlock(nn.Module):
def __init__(self, in_channels, n_segment, n_div):
super(CONV1d_FusionBlock, self).__init__()
self.n_div = n_div
self.fold = in_channels // n_div
self.n_segment = n_segment
self.temporal_conv = nn.Conv3d(in_channels=2 * self.fold,
out_channels=2 * self.fold, kernel_size=(3, 1, 1), padding=(1,
0, 0), stride=1, bias=True)
nn.init.constant_(self.temporal_conv.weight, 0)
nn.init.constant_(self.temporal_conv.bias, 0)
def forward(self, x):
"""
:param x: (nt, c, h, w)
:return:(nt, c, h, w)
"""
nt, c, h, w = x.size()
n_batch = nt // self.n_segment
x = x.view(n_batch, self.n_segment, c, h, w).transpose(1, 2)
out_part = x[:, :2 * self.fold]
out_part = self.temporal_conv(out_part)
out = torch.zeros_like(x)
out[:, :2 * self.fold] = out_part
out[:, 2 * self.fold:] = x[:, 2 * self.fold:]
out = out.transpose(1, 2).contiguous().view(nt, c, h, w)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'n_segment': 4, 'n_div': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn.parallel
import torch.optim
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16 % 4
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2 + 64 * x1), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 4
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = x1
tmp1 = tl.full([1], 2, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.load(in_ptr0 + x3, tmp2 & xmask, other=0.0)
tmp4 = tmp0 < tmp1
tmp5 = tl.load(in_ptr1 + (x0 + 16 * x2 + 64 * x1), tmp4 & xmask, other=0.0)
tmp6 = tl.load(in_ptr2 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = 0.0
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tl.where(tmp2, tmp3, tmp11)
tl.store(out_ptr0 + x3, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 2, 3, 1, 1), (6, 3, 1, 1, 1))
assert_size_stride(primals_3, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 2, 4, 4, 4), (128, 64, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(128)](primals_1, buf0, 128,
XBLOCK=128, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1, 1),
padding=(1, 0, 0), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf1, (1, 2, 4, 4, 4), (128, 64, 16, 4, 1))
del buf0
buf2 = empty_strided_cuda((1, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_clone_1[grid(256)](primals_1, buf1, primals_3,
buf2, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf1
del primals_3
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_2, reinterpret_tensor(primals_1, (1, 2, 4, 4, 4), (256,
16, 64, 4, 1), 0)
class CONV1d_FusionBlockNew(nn.Module):
def __init__(self, in_channels, n_segment, n_div):
super(CONV1d_FusionBlockNew, self).__init__()
self.n_div = n_div
self.fold = in_channels // n_div
self.n_segment = n_segment
self.temporal_conv = nn.Conv3d(in_channels=2 * self.fold,
out_channels=2 * self.fold, kernel_size=(3, 1, 1), padding=(1,
0, 0), stride=1, bias=True)
nn.init.constant_(self.temporal_conv.weight, 0)
nn.init.constant_(self.temporal_conv.bias, 0)
def forward(self, input_0):
primals_2 = self.temporal_conv.weight
primals_3 = self.temporal_conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| RongchangLi/DEN | CONV1d_FusionBlock | false | 17,884 | [
"MIT"
] | 4 | f8b744f96a3a68cf0784080ffd561a5279715727 | https://github.com/RongchangLi/DEN/tree/f8b744f96a3a68cf0784080ffd561a5279715727 |
channel_attention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/6n/c6nha2q3zyxvtg2jmtvcuoiccmtryqlajcha5a7wl5jnmf6wzrgu.py
# Topologically Sorted Source Nodes: [c_f_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# c_f_1 => relu
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_2,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_7, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/6i/c6ifoixzpo3n2enwqmjfjrkkbhhcdfacno2472g5fqwfepv4e3a3.py
# Topologically Sorted Source Nodes: [c_f_2], Original ATen: [aten.view]
# Source node to ATen node mapping:
# c_f_2 => view_8
# Graph fragment:
# %view_8 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%view_7, [16, 4]), kwargs = {})
triton_poi_fused_view_1 = async_compile.triton('triton_poi_fused_view_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_view_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*((x1 % 4) // 4))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/md/cmdnrivqr7bgqxtusri5667sknnrds42hz4cq2jhrskvam3wgznk.py
# Topologically Sorted Source Nodes: [add, channel_attention_weight], Original ATen: [aten.add, aten._softmax]
# Source node to ATen node mapping:
# add => add
# channel_attention_weight => amax, exp, sub, sum_1
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_10, %primals_4), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
triton_poi_fused__softmax_add_2 = async_compile.triton('triton_poi_fused__softmax_add_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (1))
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp9 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (2))
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp14 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (3))
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp7 = tmp4 + tmp6
tmp8 = triton_helpers.maximum(tmp3, tmp7)
tmp12 = tmp9 + tmp11
tmp13 = triton_helpers.maximum(tmp8, tmp12)
tmp17 = tmp14 + tmp16
tmp18 = triton_helpers.maximum(tmp13, tmp17)
tmp19 = tmp3 - tmp18
tmp20 = tl_math.exp(tmp19)
tmp21 = tmp7 - tmp18
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tmp24 = tmp12 - tmp18
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tmp27 = tmp17 - tmp18
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp26 + tmp28
tl.store(out_ptr0 + (x0), tmp18, xmask)
tl.store(out_ptr1 + (x0), tmp29, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/pl/cplasiznwgsc56vrvd6k2h3knetgnrb5fdansh72ckpcx2m5r345.py
# Topologically Sorted Source Nodes: [add, channel_attention_weight], Original ATen: [aten.add, aten._softmax]
# Source node to ATen node mapping:
# add => add
# channel_attention_weight => amax, div, exp, sub
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_10, %primals_4), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_add_3 = async_compile.triton('triton_poi_fused__softmax_add_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp7 = tmp5 / tmp6
tl.store(in_out_ptr0 + (x2), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 16), (16, 1))
assert_size_stride(primals_3, (1, 4), (4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [c_f], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 16), (16, 1), 0), reinterpret_tensor(primals_2, (16, 4), (1, 16), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0); del buf0 # reuse
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [c_f_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, buf7, 64, grid=grid(64), stream=stream0)
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [c_f_2], Original ATen: [aten.view]
triton_poi_fused_view_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0)
del buf1
buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [c_f_2], Original ATen: [aten.mm]
extern_kernels.mm(buf2, reinterpret_tensor(primals_3, (4, 1), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf5 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [add, channel_attention_weight], Original ATen: [aten.add, aten._softmax]
triton_poi_fused__softmax_add_2.run(buf3, primals_4, buf4, buf5, 4, grid=grid(4), stream=stream0)
buf6 = reinterpret_tensor(buf3, (4, 4), (4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [add, channel_attention_weight], Original ATen: [aten.add, aten._softmax]
triton_poi_fused__softmax_add_3.run(buf6, primals_4, buf4, buf5, 16, grid=grid(16), stream=stream0)
del buf4
del buf5
del primals_4
return (buf6, reinterpret_tensor(primals_1, (16, 16), (16, 1), 0), buf2, buf6, primals_3, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class channel_attention(nn.Module):
def __init__(self, in_channels, feature_size):
super(channel_attention, self).__init__()
self.fc1 = nn.Linear(feature_size * feature_size, feature_size,
bias=False)
self.relu1 = nn.ReLU(inplace=True)
self.fc2 = nn.Linear(feature_size, 1, bias=False)
self.bias = nn.Parameter(torch.zeros(in_channels))
self.softmax = nn.Softmax()
def forward(self, target_feature):
b, c, h, w = target_feature.shape
target_feature_resize = target_feature.view(b, c, h * w)
c_f = self.fc1(target_feature_resize)
c_f = self.relu1(c_f)
c_f = self.fc2(c_f)
c_f = c_f.view(b, c)
channel_attention_weight = self.softmax(c_f + self.bias)
return channel_attention_weight
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'feature_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * (x1 % 4 // 4)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused__softmax_add_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + 1)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + 2)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp14 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr1 + 3)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp7 = tmp4 + tmp6
tmp8 = triton_helpers.maximum(tmp3, tmp7)
tmp12 = tmp9 + tmp11
tmp13 = triton_helpers.maximum(tmp8, tmp12)
tmp17 = tmp14 + tmp16
tmp18 = triton_helpers.maximum(tmp13, tmp17)
tmp19 = tmp3 - tmp18
tmp20 = tl_math.exp(tmp19)
tmp21 = tmp7 - tmp18
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tmp24 = tmp12 - tmp18
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tmp27 = tmp17 - tmp18
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp26 + tmp28
tl.store(out_ptr0 + x0, tmp18, xmask)
tl.store(out_ptr1 + x0, tmp29, xmask)
@triton.jit
def triton_poi_fused__softmax_add_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp7 = tmp5 / tmp6
tl.store(in_out_ptr0 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 16), (16, 1))
assert_size_stride(primals_3, (1, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 16), (16, 1),
0), reinterpret_tensor(primals_2, (16, 4), (1, 16), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0)
del buf0
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(64)](buf1, buf7, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
triton_poi_fused_view_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf1
buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_3, (4, 1), (1, 4
), 0), out=buf3)
buf4 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf5 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused__softmax_add_2[grid(4)](buf3, primals_4, buf4,
buf5, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf6 = reinterpret_tensor(buf3, (4, 4), (4, 1), 0)
del buf3
triton_poi_fused__softmax_add_3[grid(16)](buf6, primals_4, buf4,
buf5, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf4
del buf5
del primals_4
return buf6, reinterpret_tensor(primals_1, (16, 16), (16, 1), 0
), buf2, buf6, primals_3, buf7
class channel_attentionNew(nn.Module):
def __init__(self, in_channels, feature_size):
super(channel_attentionNew, self).__init__()
self.fc1 = nn.Linear(feature_size * feature_size, feature_size,
bias=False)
self.relu1 = nn.ReLU(inplace=True)
self.fc2 = nn.Linear(feature_size, 1, bias=False)
self.bias = nn.Parameter(torch.zeros(in_channels))
self.softmax = nn.Softmax()
def forward(self, input_0):
primals_4 = self.bias
primals_2 = self.fc1.weight
primals_3 = self.fc2.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| SCUT-AILab/AFA | channel_attention | false | 17,885 | [
"BSD-3-Clause"
] | 7 | acfb42236ce0114d63f22a821fc5954c8c149f45 | https://github.com/SCUT-AILab/AFA/tree/acfb42236ce0114d63f22a821fc5954c8c149f45 |
MLPSoftQNetwork | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/zm/czm6acrrgjryz6xi3wza7npycjuiqsdsygpfdo3lbzaquecrmeuj.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/7w/c7w7ir6ad5ntpdgwrvg4l7aedbo6q2o3b6f2u354xv322n3olypk.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %add_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_4), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_2,), kwargs = {})
triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 5600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 1400
x1 = (xindex // 1400)
tmp0 = tl.load(in_out_ptr0 + (x0 + (1408*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x0 + (1408*x1)), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/vj/cvjj7xafezdomxu5l2gnnds6tywircz4upc4yzifxu5gml44vage.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_2 => relu_1
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_6), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_2 = async_compile.triton('triton_poi_fused_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/gr/cgr64ocnajtn5pia3ej2wsbfnpd2hjo5xeejpgs3m4k3hmyjuuur.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_3 => relu_2
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_8), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_3 = async_compile.triton('triton_poi_fused_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (1400, 8), (8, 1))
assert_size_stride(primals_4, (1400, ), (1, ))
assert_size_stride(primals_5, (1024, 1400), (1400, 1))
assert_size_stride(primals_6, (1024, ), (1, ))
assert_size_stride(primals_7, (256, 1024), (1024, 1))
assert_size_stride(primals_8, (256, ), (1, ))
assert_size_stride(primals_9, (1, 256), (256, 1))
assert_size_stride(primals_10, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 1400), (1408, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 1400), (1, 8), 0), out=buf1)
del primals_3
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf2, primals_4, 5600, grid=grid(5600), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (1400, 1024), (1, 1400), 0), out=buf3)
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu]
triton_poi_fused_relu_2.run(buf4, primals_6, 4096, grid=grid(4096), stream=stream0)
del primals_6
buf5 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf4, reinterpret_tensor(primals_7, (1024, 256), (1, 1024), 0), out=buf5)
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
triton_poi_fused_relu_3.run(buf6, primals_8, 1024, grid=grid(1024), stream=stream0)
del primals_8
buf8 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_10, buf6, reinterpret_tensor(primals_9, (256, 1), (1, 256), 0), alpha=1, beta=1, out=buf8)
del primals_10
return (buf8, buf0, buf2, buf4, buf6, primals_9, primals_7, primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1400, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1400, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1024, 1400), (1400, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((256, 1024), (1024, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class MLPSoftQNetwork(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_size1=1400,
hidden_size2=1024, hidden_size3=256, init_w=0.003):
super(MLPSoftQNetwork, self).__init__()
self.linear1 = nn.Linear(num_inputs + num_actions, hidden_size1)
self.linear2 = nn.Linear(hidden_size1, hidden_size2)
self.linear3 = nn.Linear(hidden_size2, hidden_size3)
self.linear4 = nn.Linear(hidden_size3, 1)
self.linear4.weight.data.uniform_(-init_w, init_w)
self.linear4.bias.data.uniform_(-init_w, init_w)
def forward(self, state, action):
x = torch.cat([state, action], 1)
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = F.relu(self.linear3(x))
x = self.linear4(x)
return x
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'num_inputs': 4, 'num_actions': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 5600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 1400
x1 = xindex // 1400
tmp0 = tl.load(in_out_ptr0 + (x0 + 1408 * x1), xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x0 + 1408 * x1), tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (1400, 8), (8, 1))
assert_size_stride(primals_4, (1400,), (1,))
assert_size_stride(primals_5, (1024, 1400), (1400, 1))
assert_size_stride(primals_6, (1024,), (1,))
assert_size_stride(primals_7, (256, 1024), (1024, 1))
assert_size_stride(primals_8, (256,), (1,))
assert_size_stride(primals_9, (1, 256), (256, 1))
assert_size_stride(primals_10, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 1400), (1408, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 1400), (1,
8), 0), out=buf1)
del primals_3
buf2 = buf1
del buf1
triton_poi_fused_relu_1[grid(5600)](buf2, primals_4, 5600, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (1400, 1024),
(1, 1400), 0), out=buf3)
buf4 = buf3
del buf3
triton_poi_fused_relu_2[grid(4096)](buf4, primals_6, 4096, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_6
buf5 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(buf4, reinterpret_tensor(primals_7, (1024, 256),
(1, 1024), 0), out=buf5)
buf6 = buf5
del buf5
triton_poi_fused_relu_3[grid(1024)](buf6, primals_8, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_8
buf8 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_10, buf6, reinterpret_tensor(primals_9,
(256, 1), (1, 256), 0), alpha=1, beta=1, out=buf8)
del primals_10
return buf8, buf0, buf2, buf4, buf6, primals_9, primals_7, primals_5
class MLPSoftQNetworkNew(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_size1=1400,
hidden_size2=1024, hidden_size3=256, init_w=0.003):
super(MLPSoftQNetworkNew, self).__init__()
self.linear1 = nn.Linear(num_inputs + num_actions, hidden_size1)
self.linear2 = nn.Linear(hidden_size1, hidden_size2)
self.linear3 = nn.Linear(hidden_size2, hidden_size3)
self.linear4 = nn.Linear(hidden_size3, 1)
self.linear4.weight.data.uniform_(-init_w, init_w)
self.linear4.bias.data.uniform_(-init_w, init_w)
def forward(self, input_0, input_1):
primals_3 = self.linear1.weight
primals_4 = self.linear1.bias
primals_5 = self.linear2.weight
primals_6 = self.linear2.bias
primals_7 = self.linear3.weight
primals_8 = self.linear3.bias
primals_9 = self.linear4.weight
primals_10 = self.linear4.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
| SAMMiCA/DL_based_E2E_Driving | MLPSoftQNetwork | false | 17,886 | [
"MIT"
] | 4 | 01f7d74a0db7ed745cf27b9a1ebab0246015ecbd | https://github.com/SAMMiCA/DL_based_E2E_Driving/tree/01f7d74a0db7ed745cf27b9a1ebab0246015ecbd |
Fusion | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/37/c37l7ezffklnfomgas7mto2m3yaztb5ikq2a6o2uoa4usmb6uodq.py
# Topologically Sorted Source Nodes: [sub, pow_1, neg, add, relu, add_1], Original ATen: [aten.sub, aten.pow, aten.neg, aten.add, aten.relu]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# neg => neg
# pow_1 => pow_1
# relu => relu
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%pow_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg, %relu), kwargs = {})
triton_poi_fused_add_neg_pow_relu_sub_0 = async_compile.triton('triton_poi_fused_add_neg_pow_relu_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_neg_pow_relu_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_neg_pow_relu_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = -tmp3
tmp5 = tmp0 + tmp1
tmp6 = tl.full([1], 0, tl.int32)
tmp7 = triton_helpers.maximum(tmp6, tmp5)
tmp8 = tmp4 + tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, pow_1, neg, add, relu, add_1], Original ATen: [aten.sub, aten.pow, aten.neg, aten.add, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_add_neg_pow_relu_sub_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Fusion(nn.Module):
""" Crazy multi-modal fusion: negative squared difference minus relu'd sum
"""
def __init__(self):
super().__init__()
def forward(self, x, y):
return -(x - y) ** 2 + torch.relu(x + y)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_neg_pow_relu_sub_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = -tmp3
tmp5 = tmp0 + tmp1
tmp6 = tl.full([1], 0, tl.int32)
tmp7 = triton_helpers.maximum(tmp6, tmp5)
tmp8 = tmp4 + tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_neg_pow_relu_sub_0[grid(256)](arg0_1, arg1_1,
buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class FusionNew(nn.Module):
""" Crazy multi-modal fusion: negative squared difference minus relu'd sum
"""
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| Ruiver/CTCNet | Fusion | false | 17,887 | [
"Apache-2.0"
] | 6 | 539e55ec9fed06028379d35dfd5cd4074755ffd8 | https://github.com/Ruiver/CTCNet/tree/539e55ec9fed06028379d35dfd5cd4074755ffd8 |
EncoderBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/4q/c4qjrjc7o65zi2bx4s56535druk3gzpl7fwivhbighhdv4zvnmpb.py
# Topologically Sorted Source Nodes: [conv3d, instance_norm, x], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.leaky_relu, aten.leaky_relu_backward]
# Source node to ATen node mapping:
# conv3d => convolution
# instance_norm => add, rsqrt, var_mean
# x => gt, mul_1, where
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze, %primals_1, %primals_2, [1, 1, 1], [4, 4, 4], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%unsqueeze_1, [0, 2, 3, 4]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%squeeze_3, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_3, 0.01), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %squeeze_3, %mul_1), kwargs = {})
# %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%squeeze_6, 0), kwargs = {})
triton_per_fused__native_batch_norm_legit_convolution_leaky_relu_leaky_relu_backward_0 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_leaky_relu_leaky_relu_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 1024],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_leaky_relu_leaky_relu_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_leaky_relu_leaky_relu_backward_0(in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel):
xnumel = 4
XBLOCK: tl.constexpr = 1
rnumel = 729
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (r1 + (729*x0)), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = tl.where(rmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [RBLOCK])
tmp8 = tl.where(rmask, tmp6, 0)
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp10 = tl.full([1], 729, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [RBLOCK])
tmp17 = tl.where(rmask, tmp15, 0)
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0))
tmp19 = tmp2 - tmp12
tmp20 = 729.0
tmp21 = tmp18 / tmp20
tmp22 = 1e-05
tmp23 = tmp21 + tmp22
tmp24 = libdevice.rsqrt(tmp23)
tmp25 = tmp19 * tmp24
tmp26 = 0.0
tmp27 = tmp25 > tmp26
tmp28 = 0.01
tmp29 = tmp25 * tmp28
tmp30 = tl.where(tmp27, tmp25, tmp29)
tmp31 = tmp30 > tmp26
tl.store(in_out_ptr0 + (r1 + (729*x0)), tmp2, rmask)
tl.store(out_ptr2 + (r1 + (729*x0)), tmp30, rmask)
tl.store(out_ptr3 + (r1 + (729*x0)), tmp31, rmask)
tl.store(out_ptr4 + (x0), tmp24, None)
tl.store(out_ptr0 + (x0), tmp12, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/4i/c4inbvxhb66d4ojgvjcmduhl4m6ohmjo6w5bf4rgnlouhc3yonyr.py
# Topologically Sorted Source Nodes: [conv3d_1, instance_norm_1, x_1], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.leaky_relu, aten.leaky_relu_backward]
# Source node to ATen node mapping:
# conv3d_1 => convolution_1
# instance_norm_1 => add_1, rsqrt_1, var_mean_1
# x_1 => gt_1, mul_3, where_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze_4, %primals_4, %primals_5, [1, 1, 1], [4, 4, 4], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%unsqueeze_5, [0, 2, 3, 4]), kwargs = {correction: 0, keepdim: True})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%squeeze_10, 0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_10, 0.01), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %squeeze_10, %mul_3), kwargs = {})
# %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%squeeze_15, 0), kwargs = {})
triton_red_fused__native_batch_norm_legit_convolution_leaky_relu_leaky_relu_backward_1 = async_compile.triton('triton_red_fused__native_batch_norm_legit_convolution_leaky_relu_leaky_relu_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[4, 4096],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__native_batch_norm_legit_convolution_leaky_relu_leaky_relu_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused__native_batch_norm_legit_convolution_leaky_relu_leaky_relu_backward_1(in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 4
rnumel = 2744
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_out_ptr0 + (r1 + (2744*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = triton_helpers.welford_reduce(
tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0
)
tmp4_mean = tl.where(rmask & xmask, tmp4_mean_next, tmp4_mean)
tmp4_m2 = tl.where(rmask & xmask, tmp4_m2_next, tmp4_m2)
tmp4_weight = tl.where(rmask & xmask, tmp4_weight_next, tmp4_weight)
tl.store(in_out_ptr0 + (r1 + (2744*x0)), tmp2, rmask & xmask)
tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford(
tmp4_mean, tmp4_m2, tmp4_weight, 1
)
tmp4 = tmp4_tmp[:, None]
tmp5 = tmp5_tmp[:, None]
tmp6 = tmp6_tmp[:, None]
tl.store(out_ptr0 + (x0), tmp4, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp7 = tl.load(in_out_ptr0 + (r1 + (2744*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp8 = tmp7 - tmp4
tmp9 = 2744.0
tmp10 = tmp5 / tmp9
tmp11 = 1e-05
tmp12 = tmp10 + tmp11
tmp13 = libdevice.rsqrt(tmp12)
tmp14 = tmp8 * tmp13
tmp15 = 0.0
tmp16 = tmp14 > tmp15
tmp17 = 0.01
tmp18 = tmp14 * tmp17
tmp19 = tl.where(tmp16, tmp14, tmp18)
tmp20 = tmp19 > tmp15
tl.store(out_ptr2 + (r1 + (2744*x0)), tmp19, rmask & xmask)
tl.store(out_ptr3 + (r1 + (2816*x0)), tmp20, rmask & xmask)
tmp21 = 2744.0
tmp22 = tmp5 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.rsqrt(tmp24)
tl.store(out_ptr4 + (x0), tmp25, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv3d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1, 1), padding=(4, 4, 4), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 9, 9, 9), (2916, 729, 81, 9, 1))
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((1, 4, 1, 1, 1), (4, 1, 4, 4, 4), torch.float32)
buf6 = empty_strided_cuda((4, 9, 9, 9), (729, 81, 9, 1), torch.float32)
buf15 = empty_strided_cuda((4, 9, 9, 9), (729, 81, 9, 1), torch.bool)
buf5 = empty_strided_cuda((1, 4, 1, 1, 1), (4, 1, 4, 4, 4), torch.float32)
# Topologically Sorted Source Nodes: [conv3d, instance_norm, x], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.leaky_relu, aten.leaky_relu_backward]
stream0 = get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_convolution_leaky_relu_leaky_relu_backward_0.run(buf1, primals_2, buf2, buf6, buf15, buf5, 4, 729, grid=grid(4), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv3d_1], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(reinterpret_tensor(buf6, (1, 4, 9, 9, 9), (0, 729, 81, 9, 1), 0), primals_4, stride=(1, 1, 1), padding=(4, 4, 4), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf7, (1, 4, 14, 14, 14), (10976, 2744, 196, 14, 1))
buf8 = buf7; del buf7 # reuse
buf9 = empty_strided_cuda((1, 4, 1, 1, 1), (4, 1, 4, 4, 4), torch.float32)
buf13 = empty_strided_cuda((4, 14, 14, 14), (2744, 196, 14, 1), torch.float32)
buf14 = empty_strided_cuda((4, 14, 14, 14), (2816, 196, 14, 1), torch.bool)
buf12 = empty_strided_cuda((1, 4, 1, 1, 1), (4, 1, 4, 4, 4), torch.float32)
# Topologically Sorted Source Nodes: [conv3d_1, instance_norm_1, x_1], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.leaky_relu, aten.leaky_relu_backward]
triton_red_fused__native_batch_norm_legit_convolution_leaky_relu_leaky_relu_backward_1.run(buf8, primals_5, buf9, buf13, buf14, buf12, 4, 2744, grid=grid(4), stream=stream0)
del primals_5
return (buf13, primals_1, primals_4, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf1, reinterpret_tensor(buf5, (4, ), (1, ), 0), reinterpret_tensor(buf6, (1, 4, 9, 9, 9), (2916, 729, 81, 9, 1), 0), buf8, reinterpret_tensor(buf12, (4, ), (1, ), 0), buf14, reinterpret_tensor(buf9, (1, 4, 1, 1, 1), (4, 1, 1, 1, 1), 0), buf15, reinterpret_tensor(buf2, (1, 4, 1, 1, 1), (4, 1, 1, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
from torch import nn
class EncoderBlock(nn.Module):
"""
Encoder block class
"""
def __init__(self, in_channels, out_channels, k_size, pad_size):
super(EncoderBlock, self).__init__()
self.conv1 = nn.Conv3d(in_channels, out_channels, kernel_size=
k_size, padding=pad_size)
self.conv2 = nn.Conv3d(out_channels, out_channels, kernel_size=
k_size, padding=pad_size)
self.IN1 = nn.InstanceNorm3d(out_channels)
self.IN2 = nn.InstanceNorm3d(out_channels)
def forward(self, x):
x = F.leaky_relu(self.IN1(self.conv1(x)), inplace=True)
x = F.leaky_relu(self.IN2(self.conv2(x)), inplace=True)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'k_size': 4,
'pad_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_leaky_relu_leaky_relu_backward_0(
in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel,
rnumel):
XBLOCK: tl.constexpr = 1
rnumel = 729
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (r1 + 729 * x0), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tl.where(rmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [RBLOCK])
tmp8 = tl.where(rmask, tmp6, 0)
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp10 = tl.full([1], 729, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [RBLOCK])
tmp17 = tl.where(rmask, tmp15, 0)
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0))
tmp19 = tmp2 - tmp12
tmp20 = 729.0
tmp21 = tmp18 / tmp20
tmp22 = 1e-05
tmp23 = tmp21 + tmp22
tmp24 = libdevice.rsqrt(tmp23)
tmp25 = tmp19 * tmp24
tmp26 = 0.0
tmp27 = tmp25 > tmp26
tmp28 = 0.01
tmp29 = tmp25 * tmp28
tmp30 = tl.where(tmp27, tmp25, tmp29)
tmp31 = tmp30 > tmp26
tl.store(in_out_ptr0 + (r1 + 729 * x0), tmp2, rmask)
tl.store(out_ptr2 + (r1 + 729 * x0), tmp30, rmask)
tl.store(out_ptr3 + (r1 + 729 * x0), tmp31, rmask)
tl.store(out_ptr4 + x0, tmp24, None)
tl.store(out_ptr0 + x0, tmp12, None)
@triton.jit
def triton_red_fused__native_batch_norm_legit_convolution_leaky_relu_leaky_relu_backward_1(
in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel,
rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 4
rnumel = 2744
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_out_ptr0 + (r1 + 2744 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = (triton_helpers.
welford_reduce(tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0)
)
tmp4_mean = tl.where(rmask & xmask, tmp4_mean_next, tmp4_mean)
tmp4_m2 = tl.where(rmask & xmask, tmp4_m2_next, tmp4_m2)
tmp4_weight = tl.where(rmask & xmask, tmp4_weight_next, tmp4_weight)
tl.store(in_out_ptr0 + (r1 + 2744 * x0), tmp2, rmask & xmask)
tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford(tmp4_mean,
tmp4_m2, tmp4_weight, 1)
tmp4 = tmp4_tmp[:, None]
tmp5 = tmp5_tmp[:, None]
tmp6_tmp[:, None]
tl.store(out_ptr0 + x0, tmp4, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp7 = tl.load(in_out_ptr0 + (r1 + 2744 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp8 = tmp7 - tmp4
tmp9 = 2744.0
tmp10 = tmp5 / tmp9
tmp11 = 1e-05
tmp12 = tmp10 + tmp11
tmp13 = libdevice.rsqrt(tmp12)
tmp14 = tmp8 * tmp13
tmp15 = 0.0
tmp16 = tmp14 > tmp15
tmp17 = 0.01
tmp18 = tmp14 * tmp17
tmp19 = tl.where(tmp16, tmp14, tmp18)
tmp20 = tmp19 > tmp15
tl.store(out_ptr2 + (r1 + 2744 * x0), tmp19, rmask & xmask)
tl.store(out_ptr3 + (r1 + 2816 * x0), tmp20, rmask & xmask)
tmp21 = 2744.0
tmp22 = tmp5 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.rsqrt(tmp24)
tl.store(out_ptr4 + x0, tmp25, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1,
4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1,
1), padding=(4, 4, 4), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 9, 9, 9), (2916, 729, 81, 9, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((1, 4, 1, 1, 1), (4, 1, 4, 4, 4), torch.
float32)
buf6 = empty_strided_cuda((4, 9, 9, 9), (729, 81, 9, 1), torch.float32)
buf15 = empty_strided_cuda((4, 9, 9, 9), (729, 81, 9, 1), torch.bool)
buf5 = empty_strided_cuda((1, 4, 1, 1, 1), (4, 1, 4, 4, 4), torch.
float32)
get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_convolution_leaky_relu_leaky_relu_backward_0[
grid(4)](buf1, primals_2, buf2, buf6, buf15, buf5, 4, 729,
num_warps=8, num_stages=1)
del primals_2
buf7 = extern_kernels.convolution(reinterpret_tensor(buf6, (1, 4, 9,
9, 9), (0, 729, 81, 9, 1), 0), primals_4, stride=(1, 1, 1),
padding=(4, 4, 4), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf7, (1, 4, 14, 14, 14), (10976, 2744, 196, 14, 1))
buf8 = buf7
del buf7
buf9 = empty_strided_cuda((1, 4, 1, 1, 1), (4, 1, 4, 4, 4), torch.
float32)
buf13 = empty_strided_cuda((4, 14, 14, 14), (2744, 196, 14, 1),
torch.float32)
buf14 = empty_strided_cuda((4, 14, 14, 14), (2816, 196, 14, 1),
torch.bool)
buf12 = empty_strided_cuda((1, 4, 1, 1, 1), (4, 1, 4, 4, 4), torch.
float32)
triton_red_fused__native_batch_norm_legit_convolution_leaky_relu_leaky_relu_backward_1[
grid(4)](buf8, primals_5, buf9, buf13, buf14, buf12, 4, 2744,
XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1)
del primals_5
return buf13, primals_1, primals_4, reinterpret_tensor(primals_3, (1, 4,
4, 4, 4), (256, 64, 16, 4, 1), 0), buf1, reinterpret_tensor(buf5, (
4,), (1,), 0), reinterpret_tensor(buf6, (1, 4, 9, 9, 9), (2916, 729,
81, 9, 1), 0), buf8, reinterpret_tensor(buf12, (4,), (1,), 0
), buf14, reinterpret_tensor(buf9, (1, 4, 1, 1, 1), (4, 1, 1, 1, 1), 0
), buf15, reinterpret_tensor(buf2, (1, 4, 1, 1, 1), (4, 1, 1, 1, 1), 0)
class EncoderBlockNew(nn.Module):
"""
Encoder block class
"""
def __init__(self, in_channels, out_channels, k_size, pad_size):
super(EncoderBlockNew, self).__init__()
self.conv1 = nn.Conv3d(in_channels, out_channels, kernel_size=
k_size, padding=pad_size)
self.conv2 = nn.Conv3d(out_channels, out_channels, kernel_size=
k_size, padding=pad_size)
self.IN1 = nn.InstanceNorm3d(out_channels)
self.IN2 = nn.InstanceNorm3d(out_channels)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| SVRTK/Segmentation_FetalMRI | EncoderBlock | false | 17,888 | [
"Apache-2.0"
] | 6 | 9344a2248cbe8e4cccbe05ca98214626dcf62805 | https://github.com/SVRTK/Segmentation_FetalMRI/tree/9344a2248cbe8e4cccbe05ca98214626dcf62805 |
pixel_attention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/vu/cvuk26doyygvttg2zp55bryzzvez7ugdrqavksjocjhjxdz7aa6d.py
# Topologically Sorted Source Nodes: [p_f], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# p_f => mean
# Graph fragment:
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [1]), kwargs = {})
triton_poi_fused_mean_0 = async_compile.triton('triton_poi_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/wp/cwpczflhtyj2omgfyg2hqvwplm5pdnymetmidlgg4y7t4zrdzeow.py
# Topologically Sorted Source Nodes: [p_f_2], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# p_f_2 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%mm,), kwargs = {})
triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/gj/cgjj73dwshjqw75xlonnnznvqzixgmi4ffejc5sda6n733bqr3zr.py
# Topologically Sorted Source Nodes: [pixel_attention_weight], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# pixel_attention_weight => amax, div, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_per_fused__softmax_2 = async_compile.triton('triton_per_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_2(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + (16*x0)), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 16), (16, 1))
assert_size_stride(primals_3, (16, 4), (4, 1))
assert_size_stride(primals_4, (16, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [p_f], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_poi_fused_mean_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [p_f_1], Original ATen: [aten.mm]
extern_kernels.mm(buf0, reinterpret_tensor(primals_2, (16, 4), (1, 16), 0), out=buf1)
del primals_2
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [p_f_2], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf2, 16, grid=grid(16), stream=stream0)
buf3 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [p_f_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, buf2, reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_4
buf6 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [pixel_attention_weight], Original ATen: [aten._softmax]
triton_per_fused__softmax_2.run(buf3, buf6, 4, 16, grid=grid(4), stream=stream0)
del buf3
return (reinterpret_tensor(buf6, (4, 1, 16), (16, 16, 1), 0), buf0, buf2, buf6, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class pixel_attention(nn.Module):
def __init__(self, in_channels, feature_size):
super(pixel_attention, self).__init__()
self.fc1 = nn.Linear(feature_size * feature_size, feature_size,
bias=False)
self.relu1 = nn.ReLU(inplace=True)
self.fc2 = nn.Linear(feature_size, feature_size * feature_size,
bias=True)
self.softmax = nn.Softmax()
def forward(self, target_feature):
b, c, h, w = target_feature.shape
target_feature_resize = target_feature.view(b, c, h * w)
p_f = torch.mean(target_feature_resize, dim=1)
p_f = self.fc1(p_f)
p_f = self.relu1(p_f)
p_f = self.fc2(p_f)
p_f = p_f.view(b, h * w)
pixel_attention_weight = self.softmax(p_f)
pixel_attention_weight = pixel_attention_weight.reshape(b, 1, h * w)
return pixel_attention_weight
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'feature_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_per_fused__softmax_2(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 16), (16, 1))
assert_size_stride(primals_3, (16, 4), (4, 1))
assert_size_stride(primals_4, (16,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_2, (16, 4), (1,
16), 0), out=buf1)
del primals_2
buf2 = buf1
del buf1
triton_poi_fused_relu_1[grid(16)](buf2, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf3 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_4, buf2, reinterpret_tensor(primals_3,
(4, 16), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_4
buf6 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
triton_per_fused__softmax_2[grid(4)](buf3, buf6, 4, 16, XBLOCK=1,
num_warps=2, num_stages=1)
del buf3
return reinterpret_tensor(buf6, (4, 1, 16), (16, 16, 1), 0
), buf0, buf2, buf6, primals_3
class pixel_attentionNew(nn.Module):
def __init__(self, in_channels, feature_size):
super(pixel_attentionNew, self).__init__()
self.fc1 = nn.Linear(feature_size * feature_size, feature_size,
bias=False)
self.relu1 = nn.ReLU(inplace=True)
self.fc2 = nn.Linear(feature_size, feature_size * feature_size,
bias=True)
self.softmax = nn.Softmax()
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc2.weight
primals_4 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| SCUT-AILab/AFA | pixel_attention | false | 17,889 | [
"BSD-3-Clause"
] | 7 | acfb42236ce0114d63f22a821fc5954c8c149f45 | https://github.com/SCUT-AILab/AFA/tree/acfb42236ce0114d63f22a821fc5954c8c149f45 |
QREmbeddingBag | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/io/cioiwqy3wp264p67oelqrbepb5du3csy5v4xngdxxvd35a5l5wgp.py
# Topologically Sorted Source Nodes: [embed_q], Original ATen: [aten.arange]
# Source node to ATen node mapping:
# embed_q => iota
# Graph fragment:
# %iota : [num_users=3] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 4, dtype: torch.int64, device: cuda:0, requires_grad: False})
triton_poi_fused_arange_0 = async_compile.triton('triton_poi_fused_arange_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_arange_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_arange_0(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 4*x0
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/yw/cyw3ikao3dwuy2ydxnihi2xzqgx3v3ytlsenroz34x25sbm2vmqu.py
# Topologically Sorted Source Nodes: [truediv, input_q, remainder, input_r], Original ATen: [aten.div, aten._to_copy, aten.remainder]
# Source node to ATen node mapping:
# input_q => convert_element_type
# input_r => convert_element_type_1
# remainder => remainder
# truediv => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, 4), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%div, torch.int64), kwargs = {})
# %remainder : [num_users=1] = call_function[target=torch.ops.aten.remainder.Scalar](args = (%primals_1, 4), kwargs = {})
# %convert_element_type_1 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%remainder, torch.int64), kwargs = {})
triton_poi_fused__to_copy_div_remainder_1 = async_compile.triton('triton_poi_fused__to_copy_div_remainder_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_div_remainder_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_div_remainder_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.25
tmp2 = tmp0 * tmp1
tmp3 = tmp2.to(tl.int64)
tmp4 = 4.0
tmp5 = tmp0 % tmp4
tmp6 = tl.full([1], 0, tl.int32)
tmp7 = tmp5 != tmp6
tmp8 = libdevice.signbit(tmp5) if (tmp5).dtype is tl.float32 else tmp5 < 0
tmp9 = libdevice.signbit(tmp4) if (tmp4).dtype is tl.float32 else tmp4 < 0
tmp10 = tmp8 != tmp9
tmp11 = tmp7 & tmp10
tmp12 = tmp5 + tmp4
tmp13 = tl.where(tmp11, tmp12, tmp5)
tmp14 = tmp13.to(tl.int64)
tl.store(out_ptr0 + (x0), tmp3, xmask)
tl.store(out_ptr1 + (x0), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/td/ctdw74aytfsdbe5fr6emgyz3tz73totnee6z3gq25i3oaga4i5pu.py
# Topologically Sorted Source Nodes: [embed], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# embed => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem, %getitem_4), kwargs = {})
triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (1, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [embed_q], Original ATen: [aten.arange]
stream0 = get_raw_stream(0)
triton_poi_fused_arange_0.run(buf0, 4, grid=grid(4), stream=stream0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
# Topologically Sorted Source Nodes: [truediv, input_q, remainder, input_r], Original ATen: [aten.div, aten._to_copy, aten.remainder]
triton_poi_fused__to_copy_div_remainder_1.run(primals_1, buf1, buf7, 16, grid=grid(16), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [embed_q], Original ATen: [aten._embedding_bag]
buf2 = torch.ops.aten._embedding_bag.default(primals_2, reinterpret_tensor(buf1, (16, ), (1, ), 0), buf0, False, 1)
del primals_2
buf3 = buf2[0]
buf4 = buf2[1]
buf5 = buf2[2]
buf6 = buf2[3]
del buf2
# Topologically Sorted Source Nodes: [embed_r], Original ATen: [aten._embedding_bag]
buf8 = torch.ops.aten._embedding_bag.default(primals_3, reinterpret_tensor(buf7, (16, ), (1, ), 0), buf0, False, 1)
del primals_3
buf9 = buf8[0]
buf10 = buf8[1]
buf11 = buf8[2]
buf12 = buf8[3]
del buf8
buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [embed], Original ATen: [aten.mul]
triton_poi_fused_mul_2.run(buf3, buf9, buf13, 16, grid=grid(16), stream=stream0)
return (buf13, buf0, reinterpret_tensor(buf1, (16, ), (1, ), 0), buf3, buf4, buf5, buf6, reinterpret_tensor(buf7, (16, ), (1, ), 0), buf9, buf10, buf11, buf12, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
class QREmbeddingBag(nn.Module):
"""Computes sums or means over two 'bags' of embeddings, one using the quotient
of the indices and the other using the remainder of the indices, without
instantiating the intermediate embeddings, then performs an operation to combine these.
For bags of constant length and no :attr:`per_sample_weights`, this class
* with ``mode="sum"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.sum(dim=0)``,
* with ``mode="mean"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.mean(dim=0)``,
* with ``mode="max"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.max(dim=0)``.
However, :class:`~torch.nn.EmbeddingBag` is much more time and memory efficient than using a chain of these
operations.
QREmbeddingBag also supports per-sample weights as an argument to the forward
pass. This scales the output of the Embedding before performing a weighted
reduction as specified by ``mode``. If :attr:`per_sample_weights`` is passed, the
only supported ``mode`` is ``"sum"``, which computes a weighted sum according to
:attr:`per_sample_weights`.
Known Issues:
Autograd breaks with multiple GPUs. It breaks only with multiple embeddings.
Args:
num_categories (int): total number of unique categories. The input indices must be in
0, 1, ..., num_categories - 1.
embedding_dim (list): list of sizes for each embedding vector in each table. If ``"add"``
or ``"mult"`` operation are used, these embedding dimensions must be
the same. If a single embedding_dim is used, then it will use this
embedding_dim for both embedding tables.
num_collisions (int): number of collisions to enforce.
operation (string, optional): ``"concat"``, ``"add"``, or ``"mult". Specifies the operation
to compose embeddings. ``"concat"`` concatenates the embeddings,
``"add"`` sums the embeddings, and ``"mult"`` multiplies
(component-wise) the embeddings.
Default: ``"mult"``
max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm`
is renormalized to have norm :attr:`max_norm`.
norm_type (float, optional): The p of the p-norm to compute for the :attr:`max_norm` option. Default ``2``.
scale_grad_by_freq (boolean, optional): if given, this will scale gradients by the inverse of frequency of
the words in the mini-batch. Default ``False``.
Note: this option is not supported when ``mode="max"``.
mode (string, optional): ``"sum"``, ``"mean"`` or ``"max"``. Specifies the way to reduce the bag.
``"sum"`` computes the weighted sum, taking :attr:`per_sample_weights`
into consideration. ``"mean"`` computes the average of the values
in the bag, ``"max"`` computes the max value over each bag.
Default: ``"mean"``
sparse (bool, optional): if ``True``, gradient w.r.t. :attr:`weight` matrix will be a sparse tensor. See
Notes for more details regarding sparse gradients. Note: this option is not
supported when ``mode="max"``.
Attributes:
weight (Tensor): the learnable weights of each embedding table is the module of shape
`(num_embeddings, embedding_dim)` initialized using a uniform distribution
with sqrt(1 / num_categories).
Inputs: :attr:`input` (LongTensor), :attr:`offsets` (LongTensor, optional), and
:attr:`per_index_weights` (Tensor, optional)
- If :attr:`input` is 2D of shape `(B, N)`,
it will be treated as ``B`` bags (sequences) each of fixed length ``N``, and
this will return ``B`` values aggregated in a way depending on the :attr:`mode`.
:attr:`offsets` is ignored and required to be ``None`` in this case.
- If :attr:`input` is 1D of shape `(N)`,
it will be treated as a concatenation of multiple bags (sequences).
:attr:`offsets` is required to be a 1D tensor containing the
starting index positions of each bag in :attr:`input`. Therefore,
for :attr:`offsets` of shape `(B)`, :attr:`input` will be viewed as
having ``B`` bags. Empty bags (i.e., having 0-length) will have
returned vectors filled by zeros.
per_sample_weights (Tensor, optional): a tensor of float / double weights, or None
to indicate all weights should be taken to be ``1``. If specified, :attr:`per_sample_weights`
must have exactly the same shape as input and is treated as having the same
:attr:`offsets`, if those are not ``None``. Only supported for ``mode='sum'``.
Output shape: `(B, embedding_dim)`
"""
__constants__ = ['num_categories', 'embedding_dim', 'num_collisions',
'operation', 'max_norm', 'norm_type', 'scale_grad_by_freq', 'mode',
'sparse']
def __init__(self, num_categories, embedding_dim, num_collisions,
operation='mult', max_norm=None, norm_type=2.0, scale_grad_by_freq=
False, mode='mean', sparse=False, _weight=None):
super(QREmbeddingBag, self).__init__()
assert operation in ['concat', 'mult', 'add'], 'Not valid operation!'
self.num_categories = num_categories
if isinstance(embedding_dim, int) or len(embedding_dim) == 1:
self.embedding_dim = [embedding_dim, embedding_dim]
else:
self.embedding_dim = embedding_dim
self.num_collisions = num_collisions
self.operation = operation
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
if self.operation == 'add' or self.operation == 'mult':
assert self.embedding_dim[0] == self.embedding_dim[1
], 'Embedding dimensions do not match!'
self.num_embeddings = [int(np.ceil(num_categories / num_collisions)
), num_collisions]
if _weight is None:
self.weight_q = Parameter(torch.Tensor(self.num_embeddings[0],
self.embedding_dim[0]))
self.weight_r = Parameter(torch.Tensor(self.num_embeddings[1],
self.embedding_dim[1]))
self.reset_parameters()
else:
assert list(_weight[0].shape) == [self.num_embeddings[0], self.
embedding_dim[0]
], 'Shape of weight for quotient table does not match num_embeddings and embedding_dim'
assert list(_weight[1].shape) == [self.num_embeddings[1], self.
embedding_dim[1]
], 'Shape of weight for remainder table does not match num_embeddings and embedding_dim'
self.weight_q = Parameter(_weight[0])
self.weight_r = Parameter(_weight[1])
self.mode = mode
self.sparse = sparse
def reset_parameters(self):
nn.init.uniform_(self.weight_q, np.sqrt(1 / self.num_categories))
nn.init.uniform_(self.weight_r, np.sqrt(1 / self.num_categories))
def forward(self, input, offsets=None, per_sample_weights=None):
input_q = (input / self.num_collisions).long()
input_r = torch.remainder(input, self.num_collisions).long()
embed_q = F.embedding_bag(input_q, self.weight_q, offsets, self.
max_norm, self.norm_type, self.scale_grad_by_freq, self.mode,
self.sparse, per_sample_weights)
embed_r = F.embedding_bag(input_r, self.weight_r, offsets, self.
max_norm, self.norm_type, self.scale_grad_by_freq, self.mode,
self.sparse, per_sample_weights)
if self.operation == 'concat':
embed = torch.cat((embed_q, embed_r), dim=1)
elif self.operation == 'add':
embed = embed_q + embed_r
elif self.operation == 'mult':
embed = embed_q * embed_r
return embed
def extra_repr(self):
s = '{num_embeddings}, {embedding_dim}'
if self.max_norm is not None:
s += ', max_norm={max_norm}'
if self.norm_type != 2:
s += ', norm_type={norm_type}'
if self.scale_grad_by_freq is not False:
s += ', scale_grad_by_freq={scale_grad_by_freq}'
s += ', mode={mode}'
return s.format(**self.__dict__)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'num_categories': 4, 'embedding_dim': 4, 'num_collisions': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
import torch.nn as nn
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_arange_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 4 * x0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused__to_copy_div_remainder_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.25
tmp2 = tmp0 * tmp1
tmp3 = tmp2.to(tl.int64)
tmp4 = 4.0
tmp5 = tmp0 % tmp4
tmp6 = tl.full([1], 0, tl.int32)
tmp7 = tmp5 != tmp6
tmp8 = libdevice.signbit(tmp5) if tmp5.dtype is tl.float32 else tmp5 < 0
tmp9 = libdevice.signbit(tmp4) if tmp4.dtype is tl.float32 else tmp4 < 0
tmp10 = tmp8 != tmp9
tmp11 = tmp7 & tmp10
tmp12 = tmp5 + tmp4
tmp13 = tl.where(tmp11, tmp12, tmp5)
tmp14 = tmp13.to(tl.int64)
tl.store(out_ptr0 + x0, tmp3, xmask)
tl.store(out_ptr1 + x0, tmp14, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (1, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.int64)
get_raw_stream(0)
triton_poi_fused_arange_0[grid(4)](buf0, 4, XBLOCK=4, num_warps=1,
num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
triton_poi_fused__to_copy_div_remainder_1[grid(16)](primals_1, buf1,
buf7, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_1
buf2 = torch.ops.aten._embedding_bag.default(primals_2,
reinterpret_tensor(buf1, (16,), (1,), 0), buf0, False, 1)
del primals_2
buf3 = buf2[0]
buf4 = buf2[1]
buf5 = buf2[2]
buf6 = buf2[3]
del buf2
buf8 = torch.ops.aten._embedding_bag.default(primals_3,
reinterpret_tensor(buf7, (16,), (1,), 0), buf0, False, 1)
del primals_3
buf9 = buf8[0]
buf10 = buf8[1]
buf11 = buf8[2]
buf12 = buf8[3]
del buf8
buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_mul_2[grid(16)](buf3, buf9, buf13, 16, XBLOCK=16,
num_warps=1, num_stages=1)
return buf13, buf0, reinterpret_tensor(buf1, (16,), (1,), 0
), buf3, buf4, buf5, buf6, reinterpret_tensor(buf7, (16,), (1,), 0
), buf9, buf10, buf11, buf12
class QREmbeddingBagNew(nn.Module):
"""Computes sums or means over two 'bags' of embeddings, one using the quotient
of the indices and the other using the remainder of the indices, without
instantiating the intermediate embeddings, then performs an operation to combine these.
For bags of constant length and no :attr:`per_sample_weights`, this class
* with ``mode="sum"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.sum(dim=0)``,
* with ``mode="mean"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.mean(dim=0)``,
* with ``mode="max"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.max(dim=0)``.
However, :class:`~torch.nn.EmbeddingBag` is much more time and memory efficient than using a chain of these
operations.
QREmbeddingBag also supports per-sample weights as an argument to the forward
pass. This scales the output of the Embedding before performing a weighted
reduction as specified by ``mode``. If :attr:`per_sample_weights`` is passed, the
only supported ``mode`` is ``"sum"``, which computes a weighted sum according to
:attr:`per_sample_weights`.
Known Issues:
Autograd breaks with multiple GPUs. It breaks only with multiple embeddings.
Args:
num_categories (int): total number of unique categories. The input indices must be in
0, 1, ..., num_categories - 1.
embedding_dim (list): list of sizes for each embedding vector in each table. If ``"add"``
or ``"mult"`` operation are used, these embedding dimensions must be
the same. If a single embedding_dim is used, then it will use this
embedding_dim for both embedding tables.
num_collisions (int): number of collisions to enforce.
operation (string, optional): ``"concat"``, ``"add"``, or ``"mult". Specifies the operation
to compose embeddings. ``"concat"`` concatenates the embeddings,
``"add"`` sums the embeddings, and ``"mult"`` multiplies
(component-wise) the embeddings.
Default: ``"mult"``
max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm`
is renormalized to have norm :attr:`max_norm`.
norm_type (float, optional): The p of the p-norm to compute for the :attr:`max_norm` option. Default ``2``.
scale_grad_by_freq (boolean, optional): if given, this will scale gradients by the inverse of frequency of
the words in the mini-batch. Default ``False``.
Note: this option is not supported when ``mode="max"``.
mode (string, optional): ``"sum"``, ``"mean"`` or ``"max"``. Specifies the way to reduce the bag.
``"sum"`` computes the weighted sum, taking :attr:`per_sample_weights`
into consideration. ``"mean"`` computes the average of the values
in the bag, ``"max"`` computes the max value over each bag.
Default: ``"mean"``
sparse (bool, optional): if ``True``, gradient w.r.t. :attr:`weight` matrix will be a sparse tensor. See
Notes for more details regarding sparse gradients. Note: this option is not
supported when ``mode="max"``.
Attributes:
weight (Tensor): the learnable weights of each embedding table is the module of shape
`(num_embeddings, embedding_dim)` initialized using a uniform distribution
with sqrt(1 / num_categories).
Inputs: :attr:`input` (LongTensor), :attr:`offsets` (LongTensor, optional), and
:attr:`per_index_weights` (Tensor, optional)
- If :attr:`input` is 2D of shape `(B, N)`,
it will be treated as ``B`` bags (sequences) each of fixed length ``N``, and
this will return ``B`` values aggregated in a way depending on the :attr:`mode`.
:attr:`offsets` is ignored and required to be ``None`` in this case.
- If :attr:`input` is 1D of shape `(N)`,
it will be treated as a concatenation of multiple bags (sequences).
:attr:`offsets` is required to be a 1D tensor containing the
starting index positions of each bag in :attr:`input`. Therefore,
for :attr:`offsets` of shape `(B)`, :attr:`input` will be viewed as
having ``B`` bags. Empty bags (i.e., having 0-length) will have
returned vectors filled by zeros.
per_sample_weights (Tensor, optional): a tensor of float / double weights, or None
to indicate all weights should be taken to be ``1``. If specified, :attr:`per_sample_weights`
must have exactly the same shape as input and is treated as having the same
:attr:`offsets`, if those are not ``None``. Only supported for ``mode='sum'``.
Output shape: `(B, embedding_dim)`
"""
__constants__ = ['num_categories', 'embedding_dim', 'num_collisions',
'operation', 'max_norm', 'norm_type', 'scale_grad_by_freq', 'mode',
'sparse']
def __init__(self, num_categories, embedding_dim, num_collisions,
operation='mult', max_norm=None, norm_type=2.0, scale_grad_by_freq=
False, mode='mean', sparse=False, _weight=None):
super(QREmbeddingBagNew, self).__init__()
assert operation in ['concat', 'mult', 'add'], 'Not valid operation!'
self.num_categories = num_categories
if isinstance(embedding_dim, int) or len(embedding_dim) == 1:
self.embedding_dim = [embedding_dim, embedding_dim]
else:
self.embedding_dim = embedding_dim
self.num_collisions = num_collisions
self.operation = operation
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
if self.operation == 'add' or self.operation == 'mult':
assert self.embedding_dim[0] == self.embedding_dim[1
], 'Embedding dimensions do not match!'
self.num_embeddings = [int(np.ceil(num_categories / num_collisions)
), num_collisions]
if _weight is None:
self.weight_q = Parameter(torch.Tensor(self.num_embeddings[0],
self.embedding_dim[0]))
self.weight_r = Parameter(torch.Tensor(self.num_embeddings[1],
self.embedding_dim[1]))
self.reset_parameters()
else:
assert list(_weight[0].shape) == [self.num_embeddings[0], self.
embedding_dim[0]
], 'Shape of weight for quotient table does not match num_embeddings and embedding_dim'
assert list(_weight[1].shape) == [self.num_embeddings[1], self.
embedding_dim[1]
], 'Shape of weight for remainder table does not match num_embeddings and embedding_dim'
self.weight_q = Parameter(_weight[0])
self.weight_r = Parameter(_weight[1])
self.mode = mode
self.sparse = sparse
def reset_parameters(self):
nn.init.uniform_(self.weight_q, np.sqrt(1 / self.num_categories))
nn.init.uniform_(self.weight_r, np.sqrt(1 / self.num_categories))
def extra_repr(self):
s = '{num_embeddings}, {embedding_dim}'
if self.max_norm is not None:
s += ', max_norm={max_norm}'
if self.norm_type != 2:
s += ', norm_type={norm_type}'
if self.scale_grad_by_freq is not False:
s += ', scale_grad_by_freq={scale_grad_by_freq}'
s += ', mode={mode}'
return s.format(**self.__dict__)
def forward(self, input_0):
primals_2 = self.weight_q
primals_1 = self.weight_r
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| STAR-Laboratory/Accelerating-RecSys-Training | QREmbeddingBag | false | 17,890 | [
"MIT"
] | 5 | e43cae6fd543813b352b01510e846febd67944ad | https://github.com/STAR-Laboratory/Accelerating-RecSys-Training/tree/e43cae6fd543813b352b01510e846febd67944ad |
Classifier | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/ue/cuedzfesd5tbvovnfhdngxg7o4noqv7bql6j7gciutdvmczu6mei.py
# Topologically Sorted Source Nodes: [sum_1, v_mean], Original ATen: [aten.sum, aten.div]
# Source node to ATen node mapping:
# sum_1 => sum_1
# v_mean => div
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%primals_1, [1]), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, 4), kwargs = {})
triton_poi_fused_div_sum_0 = async_compile.triton('triton_poi_fused_div_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/us/cusfvolkrms3o4cpizbeuydje3mvjjkksbsmaatgdkv24azmdyug.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.add]
# Source node to ATen node mapping:
# out => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_4, %primals_6), kwargs = {})
triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_1, v_mean], Original ATen: [aten.sum, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_sum_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_2, q_mean], Original ATen: [aten.sum, aten.div]
triton_poi_fused_div_sum_0.run(primals_2, buf1, 64, grid=grid(64), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [out], Original ATen: [aten._trilinear]
buf2 = torch.ops.aten._trilinear.default(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), primals_5, reinterpret_tensor(buf1, (16, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3])
del primals_5
buf3 = buf2
del buf2
buf4 = reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.add]
triton_poi_fused_add_1.run(buf4, primals_6, 64, grid=grid(64), stream=stream0)
del primals_6
buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, reinterpret_tensor(buf4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5)
del primals_8
return (reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf1, (16, 4), (4, 1), 0), reinterpret_tensor(buf4, (16, 4), (4, 1), 0), primals_7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class FCNet(nn.Module):
def __init__(self, in_size, out_size, activate=None, drop=0.0):
super(FCNet, self).__init__()
self.lin = nn.Linear(in_size, out_size)
self.drop_value = drop
self.drop = nn.Dropout(drop)
self.activate = activate.lower() if activate is not None else None
if activate == 'relu':
self.ac_fn = nn.ReLU()
elif activate == 'sigmoid':
self.ac_fn = nn.Sigmoid()
elif activate == 'tanh':
self.ac_fn = nn.Tanh()
def forward(self, x):
if self.drop_value > 0:
x = self.drop(x)
x = self.lin(x)
if self.activate is not None:
x = self.ac_fn(x)
return x
class Classifier(nn.Sequential):
def __init__(self, in_features, mid_features, out_features, drop=0.0):
super(Classifier, self).__init__()
self.lin1 = FCNet(in_features, mid_features, activate='relu', drop=drop
)
self.lin2 = FCNet(mid_features, out_features, drop=drop)
self.bilinear = nn.Bilinear(in1_features=in_features, in2_features=
in_features, out_features=mid_features)
def forward(self, v, q):
"""
:param v: [batch, r1, features]
:param q: [batch, r2, features]
:return:
"""
num_obj = v.shape[2]
max_len = q.shape[2]
v_mean = v.sum(1) / num_obj
q_mean = q.sum(1) / max_len
out = self.lin1(v_mean * q_mean)
out = self.bilinear(v_mean, q_mean)
out = self.lin2(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'mid_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_sum_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_div_sum_0[grid(64)](primals_2, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_2
buf2 = torch.ops.aten._trilinear.default(reinterpret_tensor(buf0, (
16, 4), (4, 1), 0), primals_5, reinterpret_tensor(buf1, (16, 4),
(4, 1), 0), [1, 3], [0], [1, 2], [2, 3])
del primals_5
buf3 = buf2
del buf2
buf4 = reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0)
del buf3
triton_poi_fused_add_1[grid(64)](buf4, primals_6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_6
buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_8, reinterpret_tensor(buf4, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf5)
del primals_8
return reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(
buf1, (16, 4), (4, 1), 0), reinterpret_tensor(buf4, (16, 4), (4, 1), 0
), primals_7
class FCNet(nn.Module):
def __init__(self, in_size, out_size, activate=None, drop=0.0):
super(FCNet, self).__init__()
self.lin = nn.Linear(in_size, out_size)
self.drop_value = drop
self.drop = nn.Dropout(drop)
self.activate = activate.lower() if activate is not None else None
if activate == 'relu':
self.ac_fn = nn.ReLU()
elif activate == 'sigmoid':
self.ac_fn = nn.Sigmoid()
elif activate == 'tanh':
self.ac_fn = nn.Tanh()
def forward(self, x):
if self.drop_value > 0:
x = self.drop(x)
x = self.lin(x)
if self.activate is not None:
x = self.ac_fn(x)
return x
class ClassifierNew(nn.Sequential):
def __init__(self, in_features, mid_features, out_features, drop=0.0):
super(ClassifierNew, self).__init__()
self.lin1 = FCNet(in_features, mid_features, activate='relu', drop=drop
)
self.lin2 = FCNet(mid_features, out_features, drop=drop)
self.bilinear = nn.Bilinear(in1_features=in_features, in2_features=
in_features, out_features=mid_features)
def forward(self, input_0, input_1):
primals_3 = self.lin1.lin.weight
primals_4 = self.lin1.lin.bias
primals_7 = self.lin2.lin.weight
primals_6 = self.lin2.lin.bias
primals_5 = self.bilinear.weight
primals_8 = self.bilinear.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| Ruiver/CTCNet | Classifier | false | 17,891 | [
"Apache-2.0"
] | 6 | 539e55ec9fed06028379d35dfd5cd4074755ffd8 | https://github.com/Ruiver/CTCNet/tree/539e55ec9fed06028379d35dfd5cd4074755ffd8 |
MLPPolicyNetwork | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/2e/c2efxa4kcqntmvfjybxflawnkzzycjadesronza2omsjv3umgkpk.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_3 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 89600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 1400
x1 = (xindex // 1400)
tmp0 = tl.load(in_out_ptr0 + (x0 + (1408*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x0 + (1408*x1)), tmp4, xmask)
tl.store(out_ptr0 + (x0 + (1408*x1)), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/hq/chqfzhclcz3acjs3texzpyy3tcbfrwmmkspemm2qyakonop6zlo5.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/wp/cwp5hhivoyun3bsorabdwkonnw35ctbvxutky5ehzuy3jzuzmdtk.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_2 => relu_2
# Graph fragment:
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_5,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ka/ckaenykasyue5shc6m6uuk44xjltqr2kho5gbrvfrenknjzdh363.py
# Topologically Sorted Source Nodes: [log_std_1], Original ATen: [aten.clamp, aten.ge, aten.le, aten.logical_and]
# Source node to ATen node mapping:
# log_std_1 => clamp_max, clamp_min
# Graph fragment:
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%view_9, -20), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 2), kwargs = {})
# %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%view_9, -20), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_9, 2), kwargs = {})
# %logical_and : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge, %le), kwargs = {})
triton_poi_fused_clamp_ge_le_logical_and_3 = async_compile.triton('triton_poi_fused_clamp_ge_le_logical_and_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_ge_le_logical_and_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_ge_le_logical_and_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = -20.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 2.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = tmp2 >= tmp3
tmp8 = tmp2 <= tmp5
tmp9 = tmp7 & tmp8
tl.store(out_ptr0 + (x2), tmp6, xmask)
tl.store(out_ptr1 + (x2), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (1400, 4), (4, 1))
assert_size_stride(primals_2, (1400, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1024, 1400), (1400, 1))
assert_size_stride(primals_5, (1024, ), (1, ))
assert_size_stride(primals_6, (256, 1024), (1024, 1))
assert_size_stride(primals_7, (256, ), (1, ))
assert_size_stride(primals_8, (4, 256), (256, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 256), (256, 1))
assert_size_stride(primals_11, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 1400), (1408, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1400), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1400), (22528, 5632, 1408, 1), 0); del buf0 # reuse
buf12 = empty_strided_cuda((4, 4, 4, 1400), (22528, 5632, 1408, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf12, 89600, grid=grid(89600), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 1024), (1024, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 1400), (1408, 1), 0), reinterpret_tensor(primals_4, (1400, 1024), (1, 1400), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 1024), (16384, 4096, 1024, 1), 0); del buf2 # reuse
buf11 = empty_strided_cuda((4, 4, 4, 1024), (16384, 4096, 1024, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf11, 65536, grid=grid(65536), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (64, 1024), (1024, 1), 0), reinterpret_tensor(primals_6, (1024, 256), (1, 1024), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 256), (4096, 1024, 256, 1), 0); del buf4 # reuse
buf10 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_2.run(buf5, primals_7, buf10, 16384, grid=grid(16384), stream=stream0)
del primals_7
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 256), (256, 1), 0), reinterpret_tensor(primals_8, (256, 4), (1, 256), 0), alpha=1, beta=1, out=buf6)
del primals_9
buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf5, (64, 256), (256, 1), 0), reinterpret_tensor(primals_10, (256, 4), (1, 256), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [log_std_1], Original ATen: [aten.clamp, aten.ge, aten.le, aten.logical_and]
triton_poi_fused_clamp_ge_le_logical_and_3.run(buf7, primals_11, buf8, buf9, 256, grid=grid(256), stream=stream0)
del buf7
del primals_11
return (reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 1400), (1408, 1), 0), reinterpret_tensor(buf3, (64, 1024), (1024, 1), 0), reinterpret_tensor(buf5, (64, 256), (256, 1), 0), buf9, primals_10, primals_8, buf10, primals_6, buf11, primals_4, buf12, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1400, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1400, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1024, 1400), (1400, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((256, 1024), (1024, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal
class MLPPolicyNetwork(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_size1=1400,
hidden_size2=1024, hidden_size3=256, init_w=0.003, log_std_min=-20,
log_std_max=2):
super(MLPPolicyNetwork, self).__init__()
self.log_std_min = log_std_min
self.log_std_max = log_std_max
self.num_inputs = num_inputs
self.num_actions = num_actions
self.linear1 = nn.Linear(num_inputs, hidden_size1)
self.linear2 = nn.Linear(hidden_size1, hidden_size2)
self.linear3 = nn.Linear(hidden_size2, hidden_size3)
self.mean_linear = nn.Linear(hidden_size3, num_actions)
self.mean_linear.weight.data.uniform_(-init_w, init_w)
self.mean_linear.bias.data.uniform_(-init_w, init_w)
self.log_std_linear = nn.Linear(hidden_size3, num_actions)
self.log_std_linear.weight.data.uniform_(-init_w, init_w)
self.log_std_linear.bias.data.uniform_(-init_w, init_w)
def forward(self, state):
x = F.relu(self.linear1(state))
x = F.relu(self.linear2(x))
x = F.relu(self.linear3(x))
mean = self.mean_linear(x)
log_std = self.log_std_linear(x)
log_std = torch.clamp(log_std, self.log_std_min, self.log_std_max)
return mean, log_std
def sample(self, state, scale, epsilon=1e-06):
mean, log_std = self.forward(state)
std = log_std.exp()
normal = Normal(mean, std)
z = normal.rsample()
action = torch.tanh(z)
log_pi = normal.log_prob(z) - torch.log(scale * (1 - action.pow(2)) +
epsilon)
log_pi = log_pi.sum(1, keepdim=True)
return action, log_pi, mean, std
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_inputs': 4, 'num_actions': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from torch.distributions import Normal
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 89600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 1400
x1 = xindex // 1400
tmp0 = tl.load(in_out_ptr0 + (x0 + 1408 * x1), xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x0 + 1408 * x1), tmp4, xmask)
tl.store(out_ptr0 + (x0 + 1408 * x1), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_clamp_ge_le_logical_and_3(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = -20.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 2.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = tmp2 >= tmp3
tmp8 = tmp2 <= tmp5
tmp9 = tmp7 & tmp8
tl.store(out_ptr0 + x2, tmp6, xmask)
tl.store(out_ptr1 + x2, tmp9, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (1400, 4), (4, 1))
assert_size_stride(primals_2, (1400,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1024, 1400), (1400, 1))
assert_size_stride(primals_5, (1024,), (1,))
assert_size_stride(primals_6, (256, 1024), (1024, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (4, 256), (256, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 256), (256, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 1400), (1408, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 1400), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1400), (22528, 5632, 1408,
1), 0)
del buf0
buf12 = empty_strided_cuda((4, 4, 4, 1400), (22528, 5632, 1408, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(89600)](buf1,
primals_2, buf12, 89600, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 1024), (1024, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 1400), (1408, 1), 0
), reinterpret_tensor(primals_4, (1400, 1024), (1, 1400), 0),
out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 1024), (16384, 4096, 1024,
1), 0)
del buf2
buf11 = empty_strided_cuda((4, 4, 4, 1024), (16384, 4096, 1024, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(65536)](buf3,
primals_5, buf11, 65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 1024), (1024, 1), 0
), reinterpret_tensor(primals_6, (1024, 256), (1, 1024), 0),
out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf4
buf10 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(16384)](buf5,
primals_7, buf10, 16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 256),
(256, 1), 0), reinterpret_tensor(primals_8, (256, 4), (1, 256),
0), alpha=1, beta=1, out=buf6)
del primals_9
buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (64, 256), (256, 1), 0),
reinterpret_tensor(primals_10, (256, 4), (1, 256), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_clamp_ge_le_logical_and_3[grid(256)](buf7,
primals_11, buf8, buf9, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf7
del primals_11
return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 1400), (1408, 1), 0
), reinterpret_tensor(buf3, (64, 1024), (1024, 1), 0
), reinterpret_tensor(buf5, (64, 256), (256, 1), 0
), buf9, primals_10, primals_8, buf10, primals_6, buf11, primals_4, buf12
class MLPPolicyNetworkNew(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_size1=1400,
hidden_size2=1024, hidden_size3=256, init_w=0.003, log_std_min=-20,
log_std_max=2):
super(MLPPolicyNetworkNew, self).__init__()
self.log_std_min = log_std_min
self.log_std_max = log_std_max
self.num_inputs = num_inputs
self.num_actions = num_actions
self.linear1 = nn.Linear(num_inputs, hidden_size1)
self.linear2 = nn.Linear(hidden_size1, hidden_size2)
self.linear3 = nn.Linear(hidden_size2, hidden_size3)
self.mean_linear = nn.Linear(hidden_size3, num_actions)
self.mean_linear.weight.data.uniform_(-init_w, init_w)
self.mean_linear.bias.data.uniform_(-init_w, init_w)
self.log_std_linear = nn.Linear(hidden_size3, num_actions)
self.log_std_linear.weight.data.uniform_(-init_w, init_w)
self.log_std_linear.bias.data.uniform_(-init_w, init_w)
def sample(self, state, scale, epsilon=1e-06):
mean, log_std = self.forward(state)
std = log_std.exp()
normal = Normal(mean, std)
z = normal.rsample()
action = torch.tanh(z)
log_pi = normal.log_prob(z) - torch.log(scale * (1 - action.pow(2)) +
epsilon)
log_pi = log_pi.sum(1, keepdim=True)
return action, log_pi, mean, std
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_6 = self.linear3.weight
primals_7 = self.linear3.bias
primals_8 = self.mean_linear.weight
primals_9 = self.mean_linear.bias
primals_10 = self.log_std_linear.weight
primals_11 = self.log_std_linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0], output[1]
| SAMMiCA/DL_based_E2E_Driving | MLPPolicyNetwork | false | 17,892 | [
"MIT"
] | 4 | 01f7d74a0db7ed745cf27b9a1ebab0246015ecbd | https://github.com/SAMMiCA/DL_based_E2E_Driving/tree/01f7d74a0db7ed745cf27b9a1ebab0246015ecbd |
HardWeightedSum | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/qx/cqxz2e3zqo42e7c2djljfyw3drwgrux4rkof7irihkhyju5qyxjm.py
# Topologically Sorted Source Nodes: [weights_num, sum_1], Original ATen: [aten.relu, aten.sum]
# Source node to ATen node mapping:
# sum_1 => sum_1
# weights_num => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%primals_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%relu,), kwargs = {})
triton_per_fused_relu_sum_0 = async_compile.triton('triton_per_fused_relu_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 2],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_relu_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_relu_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 2
RBLOCK: tl.constexpr = 2
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.full([1, 1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp5, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/if/cifmjw6zeofqorcz3d33izk4faz4upg7oslr62tb2ebmgay6dhqy.py
# Topologically Sorted Source Nodes: [weights_num, weights_denom, mul, truediv, sum_2], Original ATen: [aten.relu, aten.add, aten.mul, aten.div, aten.sum]
# Source node to ATen node mapping:
# mul => mul
# sum_2 => sum_2
# truediv => div
# weights_denom => add
# weights_num => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%primals_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 0.0001), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu, %primals_2), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %add), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%div, [0]), kwargs = {})
triton_poi_fused_add_div_mul_relu_sum_1 = async_compile.triton('triton_poi_fused_add_div_mul_relu_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_relu_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_relu_sum_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp4 = tl.load(in_ptr1 + (x0), xmask)
tmp6 = tl.load(in_ptr2 + (0))
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp11 = tl.load(in_ptr0 + (1))
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp2 = tl.full([1], 0, tl.int32)
tmp3 = triton_helpers.maximum(tmp2, tmp1)
tmp5 = tmp3 * tmp4
tmp8 = 0.0001
tmp9 = tmp7 + tmp8
tmp10 = tmp5 / tmp9
tmp13 = triton_helpers.maximum(tmp2, tmp12)
tmp14 = tmp13 * tmp4
tmp15 = tmp14 / tmp9
tmp16 = tmp10 + tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (2, 1, 1, 1, 1), (1, 1, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [weights_num, sum_1], Original ATen: [aten.relu, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_relu_sum_0.run(primals_1, buf0, 1, 2, grid=grid(1), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [weights_num, weights_denom, mul, truediv, sum_2], Original ATen: [aten.relu, aten.add, aten.mul, aten.div, aten.sum]
triton_poi_fused_add_div_mul_relu_sum_1.run(primals_1, primals_2, buf0, buf1, 256, grid=grid(256), stream=stream0)
del buf0
return (buf1, primals_1, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((2, 1, 1, 1, 1), (1, 1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class HardWeightedSum(nn.Module):
def __init__(self, op_number=2, act=nn.ReLU, eps=0.0001):
super(HardWeightedSum, self).__init__()
shape = op_number, 1, 1, 1, 1
self.weights = nn.Parameter(torch.ones(shape), requires_grad=True)
self.act = act()
self.eps = eps
def forward(self, x):
weights_num = self.act(self.weights)
weights_denom = torch.sum(weights_num) + self.eps
return torch.sum(weights_num * x / weights_denom, dim=0)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_relu_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
RBLOCK: tl.constexpr = 2
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.full([1, 1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None)
@triton.jit
def triton_poi_fused_add_div_mul_relu_sum_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp4 = tl.load(in_ptr1 + x0, xmask)
tmp6 = tl.load(in_ptr2 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp11 = tl.load(in_ptr0 + 1)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp2 = tl.full([1], 0, tl.int32)
tmp3 = triton_helpers.maximum(tmp2, tmp1)
tmp5 = tmp3 * tmp4
tmp8 = 0.0001
tmp9 = tmp7 + tmp8
tmp10 = tmp5 / tmp9
tmp13 = triton_helpers.maximum(tmp2, tmp12)
tmp14 = tmp13 * tmp4
tmp15 = tmp14 / tmp9
tmp16 = tmp10 + tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (2, 1, 1, 1, 1), (1, 1, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_relu_sum_0[grid(1)](primals_1, buf0, 1, 2, XBLOCK=
1, num_warps=2, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_mul_relu_sum_1[grid(256)](primals_1,
primals_2, buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
return buf1, primals_1, primals_2
class HardWeightedSumNew(nn.Module):
def __init__(self, op_number=2, act=nn.ReLU, eps=0.0001):
super(HardWeightedSumNew, self).__init__()
shape = op_number, 1, 1, 1, 1
self.weights = nn.Parameter(torch.ones(shape), requires_grad=True)
self.act = act()
self.eps = eps
def forward(self, input_0):
primals_1 = self.weights
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| Senyaaa/detection-experiments | HardWeightedSum | false | 17,893 | [
"Apache-2.0"
] | 5 | 5e80dd458e886ca27db5420d25ade8f9d74ae5a8 | https://github.com/Senyaaa/detection-experiments/tree/5e80dd458e886ca27db5420d25ade8f9d74ae5a8 |
DecoderBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/n3/cn3jjpdisicaaigx6zzv56bnajat5bfugqbufbm54r3v5s7rlpxq.py
# Topologically Sorted Source Nodes: [conv3d, instance_norm, x], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.leaky_relu]
# Source node to ATen node mapping:
# conv3d => convolution
# instance_norm => add, rsqrt, var_mean
# x => gt, mul_1, where
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1, 1], [4, 4, 4], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3, 4]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.01), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul_1), kwargs = {})
triton_per_fused__native_batch_norm_legit_convolution_leaky_relu_0 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 1024],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_leaky_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_leaky_relu_0(in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel):
xnumel = 16
XBLOCK: tl.constexpr = 1
rnumel = 729
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = rindex < rnumel
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + (729*x3)), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = tl.where(rmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [RBLOCK])
tmp8 = tl.where(rmask, tmp6, 0)
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp10 = tl.full([1], 729, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [RBLOCK])
tmp17 = tl.where(rmask, tmp15, 0)
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0))
tmp19 = tmp2 - tmp12
tmp20 = 729.0
tmp21 = tmp18 / tmp20
tmp22 = 1e-05
tmp23 = tmp21 + tmp22
tmp24 = libdevice.rsqrt(tmp23)
tmp25 = tmp19 * tmp24
tmp26 = 0.0
tmp27 = tmp25 > tmp26
tmp28 = 0.01
tmp29 = tmp25 * tmp28
tmp30 = tl.where(tmp27, tmp25, tmp29)
tl.store(in_out_ptr0 + (r2 + (729*x3)), tmp2, rmask)
tl.store(out_ptr2 + (r2 + (729*x3)), tmp30, rmask)
tl.store(out_ptr3 + (x3), tmp24, None)
tl.store(out_ptr0 + (x3), tmp12, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/3n/c3nn3ch7c5jgfcy3gwn6li4szfn6r73hcj5gif5otzvenr3cbc7f.py
# Topologically Sorted Source Nodes: [conv3d_1, instance_norm_1], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# conv3d_1 => convolution_1
# instance_norm_1 => add_1, rsqrt_1, var_mean_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%view_3, %primals_4, %primals_5, [1, 1, 1], [4, 4, 4], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_5, [0, 2, 3, 4]), kwargs = {correction: 0, keepdim: True})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
triton_red_fused__native_batch_norm_legit_convolution_1 = async_compile.triton('triton_red_fused__native_batch_norm_legit_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[16, 4096],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__native_batch_norm_legit_convolution_1', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused__native_batch_norm_legit_convolution_1(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 16
rnumel = 2744
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x3 = xindex
x0 = xindex % 4
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_out_ptr0 + (r2 + (2744*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = triton_helpers.welford_reduce(
tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0
)
tmp4_mean = tl.where(rmask & xmask, tmp4_mean_next, tmp4_mean)
tmp4_m2 = tl.where(rmask & xmask, tmp4_m2_next, tmp4_m2)
tmp4_weight = tl.where(rmask & xmask, tmp4_weight_next, tmp4_weight)
tl.store(in_out_ptr0 + (r2 + (2744*x3)), tmp2, rmask & xmask)
tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford(
tmp4_mean, tmp4_m2, tmp4_weight, 1
)
tmp4 = tmp4_tmp[:, None]
tmp5 = tmp5_tmp[:, None]
tmp6 = tmp6_tmp[:, None]
tl.store(out_ptr0 + (x3), tmp4, xmask)
tmp7 = 2744.0
tmp8 = tmp5 / tmp7
tmp9 = 1e-05
tmp10 = tmp8 + tmp9
tmp11 = libdevice.rsqrt(tmp10)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x3), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/v7/cv7pjgqie5mtr6flfweflblda3jbc6ozb7wueegqnsvpd6qoxq23.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# x_2 => convert_element_type_1
# Graph fragment:
# %convert_element_type_1 : [num_users=7] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_10, torch.int64), kwargs = {})
triton_poi_fused__to_copy_2 = async_compile.triton('triton_poi_fused__to_copy_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_2(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 28
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/zu/czuwlhm6phh4sjppupp6lfvqy3j32cibv63jmzdakmuphdbv5seb.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# x_2 => add_3, clamp_max
# Graph fragment:
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_1, 1), kwargs = {})
# %clamp_max : [num_users=5] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_3, 13), kwargs = {})
triton_poi_fused_add_clamp_3 = async_compile.triton('triton_poi_fused_add_clamp_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_3(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 28
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 13, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + (x0), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/jq/cjqb6m3yjqrq7gwoc6wtgintkzzvfrw3edlrtybutlc775uaqfpa.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
# Source node to ATen node mapping:
# x_2 => add_2, clamp_max_3, clamp_min, clamp_min_3, convert_element_type, iota, mul_4, sub_2, sub_5
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (28,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota, torch.float32), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.5), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 0.5), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_4, 0.5), kwargs = {})
# %clamp_min : [num_users=4] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min, %convert_element_type_5), kwargs = {})
# %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_5, 0.0), kwargs = {})
# %clamp_max_3 : [num_users=5] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_3, 1.0), kwargs = {})
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_4 = async_compile.triton('triton_poi_fused__to_copy_add_arange_clamp_mul_sub_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_clamp_mul_sub_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_4(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 28
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/lv/clvlqcszpu6qnkejl4hwttilnw5zh6pfixhbx7b56i6us55wfsym.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten._unsafe_index, aten.sub, aten.mul, aten.add]
# Source node to ATen node mapping:
# x_2 => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, _unsafe_index_4, _unsafe_index_5, _unsafe_index_6, _unsafe_index_7, add_10, add_11, add_12, add_13, add_14, add_8, add_9, mul_10, mul_11, mul_12, mul_13, mul_7, mul_8, mul_9, sub_11, sub_12, sub_14, sub_6, sub_7, sub_8, sub_9
# Graph fragment:
# %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%view_8, [None, None, %convert_element_type_1, %convert_element_type_3, %convert_element_type_5]), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%view_8, [None, None, %convert_element_type_1, %convert_element_type_3, %clamp_max_2]), kwargs = {})
# %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%view_8, [None, None, %convert_element_type_1, %clamp_max_1, %convert_element_type_5]), kwargs = {})
# %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%view_8, [None, None, %convert_element_type_1, %clamp_max_1, %clamp_max_2]), kwargs = {})
# %_unsafe_index_4 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%view_8, [None, None, %clamp_max, %convert_element_type_3, %convert_element_type_5]), kwargs = {})
# %_unsafe_index_5 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%view_8, [None, None, %clamp_max, %convert_element_type_3, %clamp_max_2]), kwargs = {})
# %_unsafe_index_6 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%view_8, [None, None, %clamp_max, %clamp_max_1, %convert_element_type_5]), kwargs = {})
# %_unsafe_index_7 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%view_8, [None, None, %clamp_max, %clamp_max_1, %clamp_max_2]), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %clamp_max_3), kwargs = {})
# %add_8 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_7), kwargs = {})
# %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_7, %clamp_max_3), kwargs = {})
# %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_8), kwargs = {})
# %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_5, %_unsafe_index_4), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_8, %clamp_max_3), kwargs = {})
# %add_10 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_4, %mul_9), kwargs = {})
# %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_7, %_unsafe_index_6), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_9, %clamp_max_3), kwargs = {})
# %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_6, %mul_10), kwargs = {})
# %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_9, %add_8), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_11, %clamp_max_4), kwargs = {})
# %add_12 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_8, %mul_11), kwargs = {})
# %sub_12 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_11, %add_10), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_12, %clamp_max_4), kwargs = {})
# %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_10, %mul_12), kwargs = {})
# %sub_14 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_13, %add_12), kwargs = {})
# %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_14, %clamp_max_5), kwargs = {})
# %add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_12, %mul_13), kwargs = {})
triton_poi_fused__unsafe_index_add_mul_sub_5 = async_compile.triton('triton_poi_fused__unsafe_index_add_mul_sub_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*i64', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*i64', 8: '*i64', 9: '*i64', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_mul_sub_5', 'mutated_arg_names': ['in_out_ptr2'], 'no_x_dim': False, 'num_load': 11, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_mul_sub_5(in_out_ptr2, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, xnumel, XBLOCK : tl.constexpr):
xnumel = 351232
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 784) % 28
x1 = (xindex // 28) % 28
x0 = xindex % 28
x3 = (xindex // 21952)
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x2), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + (x3), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + (x3), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr6 + (x2), xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr7 + (x1), xmask, eviction_policy='evict_last')
tmp49 = tl.load(in_ptr8 + (x0), xmask, eviction_policy='evict_last')
tmp60 = tl.load(in_ptr9 + (x0), xmask, eviction_policy='evict_last')
tmp92 = tl.load(in_ptr10 + (x1), xmask, eviction_policy='evict_last')
tmp98 = tl.load(in_ptr11 + (x2), xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 14, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp10 = tmp9 + tmp1
tmp11 = tmp9 < 0
tmp12 = tl.where(tmp11, tmp10, tmp9)
tmp13 = tl.load(in_ptr3 + (tmp12 + (14*tmp8) + (196*tmp4) + (2744*x3)), xmask, eviction_policy='evict_last')
tmp15 = tmp13 - tmp14
tmp17 = tmp15 * tmp16
tmp18 = 0.0
tmp19 = tmp17 > tmp18
tmp20 = 0.01
tmp21 = tmp17 * tmp20
tmp22 = tl.where(tmp19, tmp17, tmp21)
tmp24 = tmp23 + tmp1
tmp25 = tmp23 < 0
tmp26 = tl.where(tmp25, tmp24, tmp23)
tmp27 = tl.load(in_ptr3 + (tmp12 + (14*tmp8) + (196*tmp26) + (2744*x3)), xmask, eviction_policy='evict_last')
tmp28 = tmp27 - tmp14
tmp29 = tmp28 * tmp16
tmp30 = tmp29 > tmp18
tmp31 = tmp29 * tmp20
tmp32 = tl.where(tmp30, tmp29, tmp31)
tmp34 = tmp33 + tmp1
tmp35 = tmp33 < 0
tmp36 = tl.where(tmp35, tmp34, tmp33)
tmp37 = tl.load(in_ptr3 + (tmp12 + (14*tmp36) + (196*tmp4) + (2744*x3)), xmask, eviction_policy='evict_last')
tmp38 = tmp37 - tmp14
tmp39 = tmp38 * tmp16
tmp40 = tmp39 > tmp18
tmp41 = tmp39 * tmp20
tmp42 = tl.where(tmp40, tmp39, tmp41)
tmp43 = tl.load(in_ptr3 + (tmp12 + (14*tmp36) + (196*tmp26) + (2744*x3)), xmask, eviction_policy='evict_last')
tmp44 = tmp43 - tmp14
tmp45 = tmp44 * tmp16
tmp46 = tmp45 > tmp18
tmp47 = tmp45 * tmp20
tmp48 = tl.where(tmp46, tmp45, tmp47)
tmp50 = tmp49 + tmp1
tmp51 = tmp49 < 0
tmp52 = tl.where(tmp51, tmp50, tmp49)
tmp53 = tl.load(in_ptr3 + (tmp52 + (14*tmp36) + (196*tmp26) + (2744*x3)), xmask, eviction_policy='evict_last')
tmp54 = tmp53 - tmp14
tmp55 = tmp54 * tmp16
tmp56 = tmp55 > tmp18
tmp57 = tmp55 * tmp20
tmp58 = tl.where(tmp56, tmp55, tmp57)
tmp59 = tmp58 - tmp48
tmp61 = tmp59 * tmp60
tmp62 = tmp48 + tmp61
tmp63 = tl.load(in_ptr3 + (tmp52 + (14*tmp36) + (196*tmp4) + (2744*x3)), xmask, eviction_policy='evict_last')
tmp64 = tmp63 - tmp14
tmp65 = tmp64 * tmp16
tmp66 = tmp65 > tmp18
tmp67 = tmp65 * tmp20
tmp68 = tl.where(tmp66, tmp65, tmp67)
tmp69 = tmp68 - tmp42
tmp70 = tmp69 * tmp60
tmp71 = tmp42 + tmp70
tmp72 = tl.load(in_ptr3 + (tmp52 + (14*tmp8) + (196*tmp4) + (2744*x3)), xmask, eviction_policy='evict_last')
tmp73 = tmp72 - tmp14
tmp74 = tmp73 * tmp16
tmp75 = tmp74 > tmp18
tmp76 = tmp74 * tmp20
tmp77 = tl.where(tmp75, tmp74, tmp76)
tmp78 = tmp77 - tmp22
tmp79 = tmp78 * tmp60
tmp80 = tmp22 + tmp79
tmp81 = tmp80 - tmp71
tmp82 = tl.load(in_ptr3 + (tmp52 + (14*tmp8) + (196*tmp26) + (2744*x3)), xmask, eviction_policy='evict_last')
tmp83 = tmp82 - tmp14
tmp84 = tmp83 * tmp16
tmp85 = tmp84 > tmp18
tmp86 = tmp84 * tmp20
tmp87 = tl.where(tmp85, tmp84, tmp86)
tmp88 = tmp87 - tmp32
tmp89 = tmp88 * tmp60
tmp90 = tmp32 + tmp89
tmp91 = tmp90 - tmp62
tmp93 = tmp81 * tmp92
tmp94 = tmp71 + tmp93
tmp95 = tmp91 * tmp92
tmp96 = tmp62 + tmp95
tmp97 = tmp96 - tmp94
tmp99 = tmp97 * tmp98
tmp100 = tmp94 + tmp99
tl.store(in_out_ptr2 + (x5), tmp100, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv3d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1, 1), padding=(4, 4, 4), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 9, 9, 9), (2916, 729, 81, 9, 1))
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((1, 16, 1, 1, 1), (16, 1, 16, 16, 16), torch.float32)
buf6 = empty_strided_cuda((4, 4, 9, 9, 9), (2916, 729, 81, 9, 1), torch.float32)
buf5 = empty_strided_cuda((1, 16, 1, 1, 1), (16, 1, 16, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [conv3d, instance_norm, x], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_convolution_leaky_relu_0.run(buf1, primals_2, buf2, buf6, buf5, 16, 729, grid=grid(16), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv3d_1], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf6, primals_4, stride=(1, 1, 1), padding=(4, 4, 4), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 4, 14, 14, 14), (10976, 2744, 196, 14, 1))
buf8 = buf7; del buf7 # reuse
buf9 = empty_strided_cuda((1, 16, 1, 1, 1), (16, 1, 1, 1, 1), torch.float32)
buf10 = empty_strided_cuda((1, 16, 1, 1, 1), (16, 1, 16, 16, 16), torch.float32)
buf12 = reinterpret_tensor(buf10, (1, 16, 1, 1, 1), (16, 1, 1, 1, 1), 0); del buf10 # reuse
# Topologically Sorted Source Nodes: [conv3d_1, instance_norm_1], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
triton_red_fused__native_batch_norm_legit_convolution_1.run(buf8, buf12, primals_5, buf9, 16, 2744, grid=grid(16), stream=stream0)
del primals_5
buf13 = empty_strided_cuda((28, 1, 1), (1, 1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_2.run(buf13, 28, grid=grid(28), stream=stream0)
buf14 = empty_strided_cuda((28, 1, 1), (1, 1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_3.run(buf14, 28, grid=grid(28), stream=stream0)
buf15 = empty_strided_cuda((28, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_2.run(buf15, 28, grid=grid(28), stream=stream0)
buf16 = empty_strided_cuda((28, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_3.run(buf16, 28, grid=grid(28), stream=stream0)
buf17 = empty_strided_cuda((28, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_2.run(buf17, 28, grid=grid(28), stream=stream0)
buf18 = empty_strided_cuda((28, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_3.run(buf18, 28, grid=grid(28), stream=stream0)
buf23 = empty_strided_cuda((28, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_4.run(buf23, 28, grid=grid(28), stream=stream0)
buf26 = empty_strided_cuda((28, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_4.run(buf26, 28, grid=grid(28), stream=stream0)
buf29 = empty_strided_cuda((28, 1, 1), (1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_4.run(buf29, 28, grid=grid(28), stream=stream0)
buf20 = empty_strided_cuda((4, 4, 28, 28, 28), (87808, 21952, 784, 28, 1), torch.float32)
buf27 = buf20; del buf20 # reuse
buf30 = buf27; del buf27 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten._unsafe_index, aten.sub, aten.mul, aten.add]
triton_poi_fused__unsafe_index_add_mul_sub_5.run(buf30, buf13, buf16, buf17, buf8, buf9, buf12, buf14, buf15, buf18, buf23, buf26, buf29, 351232, grid=grid(351232), stream=stream0)
return (buf30, primals_1, primals_3, primals_4, buf1, reinterpret_tensor(buf5, (16, ), (1, ), 0), buf6, buf8, buf9, buf12, buf13, buf14, buf15, buf16, buf17, buf18, buf23, buf26, buf29, reinterpret_tensor(buf2, (1, 16, 1, 1, 1), (16, 1, 1, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from functools import partial
import torch.nn.functional as F
from torch import nn
class DecoderBlock(nn.Module):
"""
Decoder block class
"""
def __init__(self, in_channels, middle_channels, out_channels, k_size,
pad_size):
super(DecoderBlock, self).__init__()
self.conv1 = nn.Conv3d(in_channels, middle_channels, kernel_size=
k_size, padding=pad_size)
self.conv2 = nn.Conv3d(middle_channels, out_channels, kernel_size=
k_size, padding=pad_size)
self.IN1 = nn.InstanceNorm3d(out_channels)
self.IN2 = nn.InstanceNorm3d(out_channels)
self.upsample = partial(F.interpolate, scale_factor=2, mode=
'trilinear', align_corners=False)
def forward(self, x):
x = F.leaky_relu(self.IN1(self.conv1(x)), inplace=True)
x = F.leaky_relu(self.IN2(self.conv2(x)), inplace=True)
x = self.upsample(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'middle_channels': 4, 'out_channels': 4,
'k_size': 4, 'pad_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from functools import partial
import torch.nn.functional as F
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_leaky_relu_0(
in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
rnumel = 729
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
rmask = rindex < rnumel
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + 729 * x3), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tl.where(rmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [RBLOCK])
tmp8 = tl.where(rmask, tmp6, 0)
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp10 = tl.full([1], 729, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [RBLOCK])
tmp17 = tl.where(rmask, tmp15, 0)
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0))
tmp19 = tmp2 - tmp12
tmp20 = 729.0
tmp21 = tmp18 / tmp20
tmp22 = 1e-05
tmp23 = tmp21 + tmp22
tmp24 = libdevice.rsqrt(tmp23)
tmp25 = tmp19 * tmp24
tmp26 = 0.0
tmp27 = tmp25 > tmp26
tmp28 = 0.01
tmp29 = tmp25 * tmp28
tmp30 = tl.where(tmp27, tmp25, tmp29)
tl.store(in_out_ptr0 + (r2 + 729 * x3), tmp2, rmask)
tl.store(out_ptr2 + (r2 + 729 * x3), tmp30, rmask)
tl.store(out_ptr3 + x3, tmp24, None)
tl.store(out_ptr0 + x3, tmp12, None)
@triton.jit
def triton_red_fused__native_batch_norm_legit_convolution_1(in_out_ptr0,
in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr,
RBLOCK: tl.constexpr):
xnumel = 16
rnumel = 2744
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x3 = xindex
x0 = xindex % 4
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_out_ptr0 + (r2 + 2744 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = (triton_helpers.
welford_reduce(tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0)
)
tmp4_mean = tl.where(rmask & xmask, tmp4_mean_next, tmp4_mean)
tmp4_m2 = tl.where(rmask & xmask, tmp4_m2_next, tmp4_m2)
tmp4_weight = tl.where(rmask & xmask, tmp4_weight_next, tmp4_weight)
tl.store(in_out_ptr0 + (r2 + 2744 * x3), tmp2, rmask & xmask)
tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford(tmp4_mean,
tmp4_m2, tmp4_weight, 1)
tmp4 = tmp4_tmp[:, None]
tmp5 = tmp5_tmp[:, None]
tmp6_tmp[:, None]
tl.store(out_ptr0 + x3, tmp4, xmask)
tmp7 = 2744.0
tmp8 = tmp5 / tmp7
tmp9 = 1e-05
tmp10 = tmp8 + tmp9
tmp11 = libdevice.rsqrt(tmp10)
tl.debug_barrier()
tl.store(in_out_ptr1 + x3, tmp11, xmask)
@triton.jit
def triton_poi_fused__to_copy_2(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 28
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_clamp_3(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 28
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 13, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + x0, tmp12, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_4(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 28
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_mul_sub_5(in_out_ptr2, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8,
in_ptr9, in_ptr10, in_ptr11, xnumel, XBLOCK: tl.constexpr):
xnumel = 351232
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 784 % 28
x1 = xindex // 28 % 28
x0 = xindex % 28
x3 = xindex // 21952
x5 = xindex
tmp0 = tl.load(in_ptr0 + x2, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + x3, xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + x3, xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr6 + x2, xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr7 + x1, xmask, eviction_policy='evict_last')
tmp49 = tl.load(in_ptr8 + x0, xmask, eviction_policy='evict_last')
tmp60 = tl.load(in_ptr9 + x0, xmask, eviction_policy='evict_last')
tmp92 = tl.load(in_ptr10 + x1, xmask, eviction_policy='evict_last')
tmp98 = tl.load(in_ptr11 + x2, xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 14, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp10 = tmp9 + tmp1
tmp11 = tmp9 < 0
tmp12 = tl.where(tmp11, tmp10, tmp9)
tmp13 = tl.load(in_ptr3 + (tmp12 + 14 * tmp8 + 196 * tmp4 + 2744 * x3),
xmask, eviction_policy='evict_last')
tmp15 = tmp13 - tmp14
tmp17 = tmp15 * tmp16
tmp18 = 0.0
tmp19 = tmp17 > tmp18
tmp20 = 0.01
tmp21 = tmp17 * tmp20
tmp22 = tl.where(tmp19, tmp17, tmp21)
tmp24 = tmp23 + tmp1
tmp25 = tmp23 < 0
tmp26 = tl.where(tmp25, tmp24, tmp23)
tmp27 = tl.load(in_ptr3 + (tmp12 + 14 * tmp8 + 196 * tmp26 + 2744 * x3),
xmask, eviction_policy='evict_last')
tmp28 = tmp27 - tmp14
tmp29 = tmp28 * tmp16
tmp30 = tmp29 > tmp18
tmp31 = tmp29 * tmp20
tmp32 = tl.where(tmp30, tmp29, tmp31)
tmp34 = tmp33 + tmp1
tmp35 = tmp33 < 0
tmp36 = tl.where(tmp35, tmp34, tmp33)
tmp37 = tl.load(in_ptr3 + (tmp12 + 14 * tmp36 + 196 * tmp4 + 2744 * x3),
xmask, eviction_policy='evict_last')
tmp38 = tmp37 - tmp14
tmp39 = tmp38 * tmp16
tmp40 = tmp39 > tmp18
tmp41 = tmp39 * tmp20
tmp42 = tl.where(tmp40, tmp39, tmp41)
tmp43 = tl.load(in_ptr3 + (tmp12 + 14 * tmp36 + 196 * tmp26 + 2744 * x3
), xmask, eviction_policy='evict_last')
tmp44 = tmp43 - tmp14
tmp45 = tmp44 * tmp16
tmp46 = tmp45 > tmp18
tmp47 = tmp45 * tmp20
tmp48 = tl.where(tmp46, tmp45, tmp47)
tmp50 = tmp49 + tmp1
tmp51 = tmp49 < 0
tmp52 = tl.where(tmp51, tmp50, tmp49)
tmp53 = tl.load(in_ptr3 + (tmp52 + 14 * tmp36 + 196 * tmp26 + 2744 * x3
), xmask, eviction_policy='evict_last')
tmp54 = tmp53 - tmp14
tmp55 = tmp54 * tmp16
tmp56 = tmp55 > tmp18
tmp57 = tmp55 * tmp20
tmp58 = tl.where(tmp56, tmp55, tmp57)
tmp59 = tmp58 - tmp48
tmp61 = tmp59 * tmp60
tmp62 = tmp48 + tmp61
tmp63 = tl.load(in_ptr3 + (tmp52 + 14 * tmp36 + 196 * tmp4 + 2744 * x3),
xmask, eviction_policy='evict_last')
tmp64 = tmp63 - tmp14
tmp65 = tmp64 * tmp16
tmp66 = tmp65 > tmp18
tmp67 = tmp65 * tmp20
tmp68 = tl.where(tmp66, tmp65, tmp67)
tmp69 = tmp68 - tmp42
tmp70 = tmp69 * tmp60
tmp71 = tmp42 + tmp70
tmp72 = tl.load(in_ptr3 + (tmp52 + 14 * tmp8 + 196 * tmp4 + 2744 * x3),
xmask, eviction_policy='evict_last')
tmp73 = tmp72 - tmp14
tmp74 = tmp73 * tmp16
tmp75 = tmp74 > tmp18
tmp76 = tmp74 * tmp20
tmp77 = tl.where(tmp75, tmp74, tmp76)
tmp78 = tmp77 - tmp22
tmp79 = tmp78 * tmp60
tmp80 = tmp22 + tmp79
tmp81 = tmp80 - tmp71
tmp82 = tl.load(in_ptr3 + (tmp52 + 14 * tmp8 + 196 * tmp26 + 2744 * x3),
xmask, eviction_policy='evict_last')
tmp83 = tmp82 - tmp14
tmp84 = tmp83 * tmp16
tmp85 = tmp84 > tmp18
tmp86 = tmp84 * tmp20
tmp87 = tl.where(tmp85, tmp84, tmp86)
tmp88 = tmp87 - tmp32
tmp89 = tmp88 * tmp60
tmp90 = tmp32 + tmp89
tmp91 = tmp90 - tmp62
tmp93 = tmp81 * tmp92
tmp94 = tmp71 + tmp93
tmp95 = tmp91 * tmp92
tmp96 = tmp62 + tmp95
tmp97 = tmp96 - tmp94
tmp99 = tmp97 * tmp98
tmp100 = tmp94 + tmp99
tl.store(in_out_ptr2 + x5, tmp100, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1, 1), padding=(4, 4, 4), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 9, 9, 9), (2916, 729, 81, 9, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((1, 16, 1, 1, 1), (16, 1, 16, 16, 16),
torch.float32)
buf6 = empty_strided_cuda((4, 4, 9, 9, 9), (2916, 729, 81, 9, 1),
torch.float32)
buf5 = empty_strided_cuda((1, 16, 1, 1, 1), (16, 1, 16, 16, 16),
torch.float32)
get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_convolution_leaky_relu_0[grid
(16)](buf1, primals_2, buf2, buf6, buf5, 16, 729, num_warps=8,
num_stages=1)
del primals_2
buf7 = extern_kernels.convolution(buf6, primals_4, stride=(1, 1, 1),
padding=(4, 4, 4), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 4, 14, 14, 14), (10976, 2744, 196, 14, 1))
buf8 = buf7
del buf7
buf9 = empty_strided_cuda((1, 16, 1, 1, 1), (16, 1, 1, 1, 1), torch
.float32)
buf10 = empty_strided_cuda((1, 16, 1, 1, 1), (16, 1, 16, 16, 16),
torch.float32)
buf12 = reinterpret_tensor(buf10, (1, 16, 1, 1, 1), (16, 1, 1, 1, 1), 0
)
del buf10
triton_red_fused__native_batch_norm_legit_convolution_1[grid(16)](buf8,
buf12, primals_5, buf9, 16, 2744, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
del primals_5
buf13 = empty_strided_cuda((28, 1, 1), (1, 1, 1), torch.int64)
triton_poi_fused__to_copy_2[grid(28)](buf13, 28, XBLOCK=32,
num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((28, 1, 1), (1, 1, 1), torch.int64)
triton_poi_fused_add_clamp_3[grid(28)](buf14, 28, XBLOCK=32,
num_warps=1, num_stages=1)
buf15 = empty_strided_cuda((28, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_2[grid(28)](buf15, 28, XBLOCK=32,
num_warps=1, num_stages=1)
buf16 = empty_strided_cuda((28, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_3[grid(28)](buf16, 28, XBLOCK=32,
num_warps=1, num_stages=1)
buf17 = empty_strided_cuda((28,), (1,), torch.int64)
triton_poi_fused__to_copy_2[grid(28)](buf17, 28, XBLOCK=32,
num_warps=1, num_stages=1)
buf18 = empty_strided_cuda((28,), (1,), torch.int64)
triton_poi_fused_add_clamp_3[grid(28)](buf18, 28, XBLOCK=32,
num_warps=1, num_stages=1)
buf23 = empty_strided_cuda((28,), (1,), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_4[grid(28)](buf23,
28, XBLOCK=32, num_warps=1, num_stages=1)
buf26 = empty_strided_cuda((28, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_4[grid(28)](buf26,
28, XBLOCK=32, num_warps=1, num_stages=1)
buf29 = empty_strided_cuda((28, 1, 1), (1, 1, 1), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_4[grid(28)](buf29,
28, XBLOCK=32, num_warps=1, num_stages=1)
buf20 = empty_strided_cuda((4, 4, 28, 28, 28), (87808, 21952, 784,
28, 1), torch.float32)
buf27 = buf20
del buf20
buf30 = buf27
del buf27
triton_poi_fused__unsafe_index_add_mul_sub_5[grid(351232)](buf30,
buf13, buf16, buf17, buf8, buf9, buf12, buf14, buf15, buf18,
buf23, buf26, buf29, 351232, XBLOCK=512, num_warps=8, num_stages=1)
return (buf30, primals_1, primals_3, primals_4, buf1,
reinterpret_tensor(buf5, (16,), (1,), 0), buf6, buf8, buf9, buf12,
buf13, buf14, buf15, buf16, buf17, buf18, buf23, buf26, buf29,
reinterpret_tensor(buf2, (1, 16, 1, 1, 1), (16, 1, 1, 1, 1), 0))
class DecoderBlockNew(nn.Module):
"""
Decoder block class
"""
def __init__(self, in_channels, middle_channels, out_channels, k_size,
pad_size):
super(DecoderBlockNew, self).__init__()
self.conv1 = nn.Conv3d(in_channels, middle_channels, kernel_size=
k_size, padding=pad_size)
self.conv2 = nn.Conv3d(middle_channels, out_channels, kernel_size=
k_size, padding=pad_size)
self.IN1 = nn.InstanceNorm3d(out_channels)
self.IN2 = nn.InstanceNorm3d(out_channels)
self.upsample = partial(F.interpolate, scale_factor=2, mode=
'trilinear', align_corners=False)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_3 = self.conv2.weight
primals_5 = self.conv2.bias
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| SVRTK/Segmentation_FetalMRI | DecoderBlock | false | 17,894 | [
"Apache-2.0"
] | 6 | 9344a2248cbe8e4cccbe05ca98214626dcf62805 | https://github.com/SVRTK/Segmentation_FetalMRI/tree/9344a2248cbe8e4cccbe05ca98214626dcf62805 |
SoftMaxWeightedSum | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/oo/coosevthtcbe2gvtu33ztt7i5dcgwk4j77vkan2jnkns2s53a45i.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%primals_1, [0], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [0], True), kwargs = {})
triton_per_fused__softmax_0 = async_compile.triton('triton_per_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 2],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 2
RBLOCK: tl.constexpr = 2
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = triton_helpers.max2(tmp1, 1)[:, None]
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp3, None)
tl.store(out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/yj/cyjphefbw4cjwexdsarqod22xsiv6523pp44sp4ujgi5xb4hw3ft.py
# Topologically Sorted Source Nodes: [softmax, mul, sum_1], Original ATen: [aten._softmax, aten.mul, aten.sum]
# Source node to ATen node mapping:
# mul => mul
# softmax => div, exp, sub
# sum_1 => sum_2
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %primals_2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [0]), kwargs = {})
triton_poi_fused__softmax_mul_sum_1 = async_compile.triton('triton_poi_fused__softmax_mul_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_mul_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_mul_sum_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + (0))
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr2 + (0))
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp9 = tl.load(in_ptr3 + (x0), xmask)
tmp11 = tl.load(in_ptr0 + (1))
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp4 = tmp1 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp8 = tmp5 / tmp7
tmp10 = tmp8 * tmp9
tmp13 = tmp12 - tmp3
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp7
tmp16 = tmp15 * tmp9
tmp17 = tmp10 + tmp16
tl.store(out_ptr0 + (x0), tmp17, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (2, 1, 1, 1, 1), (1, 1, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 1, 1, 1, 1), (1, 1, 1, 1, 1), torch.float32)
buf1 = empty_strided_cuda((1, 1, 1, 1, 1), (1, 1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_per_fused__softmax_0.run(primals_1, buf0, buf1, 1, 2, grid=grid(1), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax, mul, sum_1], Original ATen: [aten._softmax, aten.mul, aten.sum]
triton_poi_fused__softmax_mul_sum_1.run(primals_1, buf0, buf1, primals_2, buf2, 256, grid=grid(256), stream=stream0)
del buf0
del buf1
return (buf2, primals_1, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((2, 1, 1, 1, 1), (1, 1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class SoftMaxWeightedSum(nn.Module):
def __init__(self, op_number=2):
super(SoftMaxWeightedSum, self).__init__()
shape = op_number, 1, 1, 1, 1
self.weights = nn.Parameter(torch.ones(shape), requires_grad=True)
def forward(self, x):
return torch.sum(torch.softmax(self.weights, dim=0) * x, dim=0)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 2
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = triton_helpers.max2(tmp1, 1)[:, None]
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp3, None)
tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp8, None)
@triton.jit
def triton_poi_fused__softmax_mul_sum_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr2 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp9 = tl.load(in_ptr3 + x0, xmask)
tmp11 = tl.load(in_ptr0 + 1)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp4 = tmp1 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp8 = tmp5 / tmp7
tmp10 = tmp8 * tmp9
tmp13 = tmp12 - tmp3
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp7
tmp16 = tmp15 * tmp9
tmp17 = tmp10 + tmp16
tl.store(out_ptr0 + x0, tmp17, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (2, 1, 1, 1, 1), (1, 1, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 1, 1, 1, 1), (1, 1, 1, 1, 1), torch.
float32)
buf1 = empty_strided_cuda((1, 1, 1, 1, 1), (1, 1, 1, 1, 1), torch.
float32)
get_raw_stream(0)
triton_per_fused__softmax_0[grid(1)](primals_1, buf0, buf1, 1, 2,
XBLOCK=1, num_warps=2, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_mul_sum_1[grid(256)](primals_1, buf0,
buf1, primals_2, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del buf1
return buf2, primals_1, primals_2
class SoftMaxWeightedSumNew(nn.Module):
def __init__(self, op_number=2):
super(SoftMaxWeightedSumNew, self).__init__()
shape = op_number, 1, 1, 1, 1
self.weights = nn.Parameter(torch.ones(shape), requires_grad=True)
def forward(self, input_0):
primals_1 = self.weights
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| Senyaaa/detection-experiments | SoftMaxWeightedSum | false | 17,895 | [
"Apache-2.0"
] | 5 | 5e80dd458e886ca27db5420d25ade8f9d74ae5a8 | https://github.com/Senyaaa/detection-experiments/tree/5e80dd458e886ca27db5420d25ade8f9d74ae5a8 |
Decoder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/46/c46lzwejkrqhbpjcsjd2upeycrhiq77jsnd4hwzfjxcncclvh3zv.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4096
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/7o/c7ofyv54vqgy4vn637mw3dfkal2vp3ujoh4qbckoc7ki247ygkdx.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_3 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/jb/cjbtznlhohsqc4ehnuomoexjfwbzrldblektblk4mcfimtvweotj.py
# Topologically Sorted Source Nodes: [prob_1, max_1, prob_2], Original ATen: [aten.sum, aten.max, aten.sub, aten.isnan, aten.logical_and, aten.eq, aten.logical_or]
# Source node to ATen node mapping:
# max_1 => max_1
# prob_1 => sum_1
# prob_2 => sub
# Graph fragment:
# %sum_1 : [num_users=4] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_7, [0]), kwargs = {})
# %max_1 : [num_users=3] = call_function[target=torch.ops.aten.max.default](args = (%sum_1,), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, %max_1), kwargs = {})
# %isnan : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%sum_1,), kwargs = {})
# %isnan_1 : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%max_1,), kwargs = {})
# %logical_and : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%isnan, %isnan_1), kwargs = {})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%sum_1, %max_1), kwargs = {})
# %logical_or : [num_users=1] = call_function[target=torch.ops.aten.logical_or.default](args = (%eq, %logical_and), kwargs = {})
triton_per_fused_eq_isnan_logical_and_logical_or_max_sub_sum_2 = async_compile.triton('triton_per_fused_eq_isnan_logical_and_logical_or_max_sub_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_eq_isnan_logical_and_logical_or_max_sub_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_eq_isnan_logical_and_logical_or_max_sub_sum_2(in_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr0 + (64 + r0), None)
tmp3 = tl.load(in_ptr0 + (128 + r0), None)
tmp5 = tl.load(in_ptr0 + (192 + r0), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = triton_helpers.max2(tmp7, 1)[:, None]
tmp10 = tmp6 - tmp9
tmp11 = tmp6 == tmp9
tmp12 = libdevice.isnan(tmp6).to(tl.int1)
tmp13 = libdevice.isnan(tmp9).to(tl.int1)
tmp14 = tmp12 & tmp13
tmp15 = tmp11 | tmp14
tl.store(out_ptr1 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp10, None)
tl.store(out_ptr2 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/oq/coqn4sg5thbfh452xaqoeyjigp4gtslxbe24asdj7t35bnjl2ohn.py
# Topologically Sorted Source Nodes: [prob_3], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# prob_3 => amax, exp, sub_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%sub, [0], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/br/cbrb4bh2saptmbb6a3nh7gex7mxznc3rrpcdwy2hmem3r4dncl3i.py
# Topologically Sorted Source Nodes: [prob_3], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# prob_3 => div, sum_2
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [0], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_2), kwargs = {})
triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4096, 4), (4, 1))
assert_size_stride(primals_2, (4096, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (512, 4096), (4096, 1))
assert_size_stride(primals_5, (512, ), (1, ))
assert_size_stride(primals_6, (32, 512), (512, 1))
assert_size_stride(primals_7, (32, ), (1, ))
assert_size_stride(primals_8, (4, 512), (512, 1))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4096), (4096, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4096), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4096), (65536, 16384, 4096, 1), 0); del buf0 # reuse
buf12 = empty_strided_cuda((4, 4, 4, 4096), (65536, 16384, 4096, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf12, 262144, grid=grid(262144), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 512), (512, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4096), (4096, 1), 0), reinterpret_tensor(primals_4, (4096, 512), (1, 4096), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 512), (8192, 2048, 512, 1), 0); del buf2 # reuse
buf11 = empty_strided_cuda((4, 4, 4, 512), (8192, 2048, 512, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf11, 32768, grid=grid(32768), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 32), (32, 1), torch.float32)
# Topologically Sorted Source Nodes: [pos], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 512), (512, 1), 0), reinterpret_tensor(primals_6, (512, 32), (1, 512), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [prob], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, reinterpret_tensor(buf3, (64, 512), (512, 1), 0), reinterpret_tensor(primals_8, (512, 4), (1, 512), 0), alpha=1, beta=1, out=buf5)
del primals_9
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [prob_1, max_1, prob_2], Original ATen: [aten.sum, aten.max, aten.sub, aten.isnan, aten.logical_and, aten.eq, aten.logical_or]
triton_per_fused_eq_isnan_logical_and_logical_or_max_sub_sum_2.run(buf5, buf7, buf10, 1, 64, grid=grid(1), stream=stream0)
del buf5
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [prob_3], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf7, buf8, 64, grid=grid(64), stream=stream0)
buf9 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [prob_3], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf8, buf9, 64, grid=grid(64), stream=stream0)
del buf8
return (reinterpret_tensor(buf4, (64, 4, 4, 2), (32, 8, 2, 1), 0), buf9, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4096), (4096, 1), 0), reinterpret_tensor(buf3, (64, 512), (512, 1), 0), buf9, buf10, primals_8, primals_6, buf11, primals_4, buf12, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4096, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((512, 4096), (4096, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((32, 512), (512, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 512), (512, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Decoder(nn.Module):
def __init__(self, n_features, n_modes, T):
super(Decoder, self).__init__()
self.n_modes = n_modes
self.T = T
self.linear1 = nn.Linear(n_features, 4096)
self.linear2 = nn.Linear(512, n_modes * T * 2)
self.linear3 = nn.Linear(512, n_modes)
self.linear4 = nn.Linear(4096, 512)
self.softmax = nn.Softmax(dim=0)
self.relu = nn.ReLU()
def forward(self, x):
x = self.linear1(x)
x = self.relu(x)
x = self.linear4(x)
x = self.relu(x)
pos = self.linear2(x)
prob = self.linear3(x)
prob = torch.sum(prob, axis=0)
prob = prob - torch.max(prob)
prob = self.softmax(prob)
return pos.view((-1, self.n_modes, self.T, 2)), prob
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_features': 4, 'n_modes': 4, 'T': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4096
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_per_fused_eq_isnan_logical_and_logical_or_max_sub_sum_2(in_ptr0,
out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr0 + (64 + r0), None)
tmp3 = tl.load(in_ptr0 + (128 + r0), None)
tmp5 = tl.load(in_ptr0 + (192 + r0), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = triton_helpers.max2(tmp7, 1)[:, None]
tmp10 = tmp6 - tmp9
tmp11 = tmp6 == tmp9
tmp12 = libdevice.isnan(tmp6).to(tl.int1)
tmp13 = libdevice.isnan(tmp9).to(tl.int1)
tmp14 = tmp12 & tmp13
tmp15 = tmp11 | tmp14
tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp10, None)
tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp15, None)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4096, 4), (4, 1))
assert_size_stride(primals_2, (4096,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (512, 4096), (4096, 1))
assert_size_stride(primals_5, (512,), (1,))
assert_size_stride(primals_6, (32, 512), (512, 1))
assert_size_stride(primals_7, (32,), (1,))
assert_size_stride(primals_8, (4, 512), (512, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4096), (4096, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4096), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4096), (65536, 16384,
4096, 1), 0)
del buf0
buf12 = empty_strided_cuda((4, 4, 4, 4096), (65536, 16384, 4096, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(262144)](buf1,
primals_2, buf12, 262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 512), (512, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4096), (4096, 1), 0
), reinterpret_tensor(primals_4, (4096, 512), (1, 4096), 0),
out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 512), (8192, 2048, 512, 1), 0
)
del buf2
buf11 = empty_strided_cuda((4, 4, 4, 512), (8192, 2048, 512, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(32768)](buf3,
primals_5, buf11, 32768, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 32), (32, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 512),
(512, 1), 0), reinterpret_tensor(primals_6, (512, 32), (1, 512),
0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf3, (64, 512),
(512, 1), 0), reinterpret_tensor(primals_8, (512, 4), (1, 512),
0), alpha=1, beta=1, out=buf5)
del primals_9
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_per_fused_eq_isnan_logical_and_logical_or_max_sub_sum_2[grid(1)
](buf5, buf7, buf10, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del buf5
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(64)](buf7, buf8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf9 = buf7
del buf7
triton_poi_fused__softmax_4[grid(64)](buf8, buf9, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf8
return reinterpret_tensor(buf4, (64, 4, 4, 2), (32, 8, 2, 1), 0
), buf9, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4096), (4096, 1), 0
), reinterpret_tensor(buf3, (64, 512), (512, 1), 0
), buf9, buf10, primals_8, primals_6, buf11, primals_4, buf12
class DecoderNew(nn.Module):
def __init__(self, n_features, n_modes, T):
super(DecoderNew, self).__init__()
self.n_modes = n_modes
self.T = T
self.linear1 = nn.Linear(n_features, 4096)
self.linear2 = nn.Linear(512, n_modes * T * 2)
self.linear3 = nn.Linear(512, n_modes)
self.linear4 = nn.Linear(4096, 512)
self.softmax = nn.Softmax(dim=0)
self.relu = nn.ReLU()
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_6 = self.linear2.weight
primals_7 = self.linear2.bias
primals_8 = self.linear3.weight
primals_9 = self.linear3.bias
primals_4 = self.linear4.weight
primals_5 = self.linear4.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1]
| SambaranRepo/VectorNet_Waymo | Decoder | false | 17,896 | [
"MIT"
] | 4 | 454016a5020444e78943786c14e4e12a75ce052e | https://github.com/SambaranRepo/VectorNet_Waymo/tree/454016a5020444e78943786c14e4e12a75ce052e |
resBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/tt/ctthexrcgmoykvsyasq7xirwxi6m3yxgjocmuvarikaawgqvdiws.py
# Topologically Sorted Source Nodes: [pad], Original ATen: [aten.reflection_pad2d]
# Source node to ATen node mapping:
# pad => _unsafe_index, _unsafe_index_1
# Graph fragment:
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {})
triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = (xindex // 6) % 6
x2 = (xindex // 36)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/hc/chcdx6ttmlttffwusj5w6vjo2khj6mg5rgpxcpvcjyf4xor54xwb.py
# Topologically Sorted Source Nodes: [conv2d, instance_norm], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# conv2d => convolution
# instance_norm => add, rsqrt, var_mean
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_per_fused__native_batch_norm_legit_convolution_1 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_1', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_1(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + (16*x3)), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp18 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(in_out_ptr0 + (r2 + (16*x3)), tmp2, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x3), tmp23, xmask)
tl.store(out_ptr0 + (x3), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/rz/crzysa6qsoccbj2w7dc5chq6wvn3wfygzbotyhv7cwnbfqmjwdeg.py
# Topologically Sorted Source Nodes: [x, pad_1], Original ATen: [aten.relu, aten.reflection_pad2d]
# Source node to ATen node mapping:
# pad_1 => _unsafe_index_2, _unsafe_index_3
# x => relu
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_3 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_2, [None, None, None, %sub_1]), kwargs = {})
triton_poi_fused_reflection_pad2d_relu_2 = async_compile.triton('triton_poi_fused_reflection_pad2d_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_relu_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = (xindex // 6) % 6
x2 = (xindex // 36)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/2j/c2j2qub5gawp7winqcf3uwlyxdssxmpph6mjsyx7cse7pql4o2ip.py
# Topologically Sorted Source Nodes: [conv2d_1, x_1, x_2], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.add]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x_1 => add_1, rsqrt_1, var_mean_1
# x_2 => add_2
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_3, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_2, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_1), kwargs = {})
triton_per_fused__native_batch_norm_legit_add_convolution_3 = async_compile.triton('triton_per_fused__native_batch_norm_legit_add_convolution_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_add_convolution_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_convolution_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + (16*x3)), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr1 + (r2 + (16*x3)), xmask, other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = tmp2 - tmp12
tmp20 = 16.0
tmp21 = tmp18 / tmp20
tmp22 = 1e-05
tmp23 = tmp21 + tmp22
tmp24 = libdevice.rsqrt(tmp23)
tmp25 = tmp19 * tmp24
tmp27 = tmp25 + tmp26
tl.store(in_out_ptr0 + (r2 + (16*x3)), tmp2, xmask)
tl.store(out_ptr2 + (r2 + (16*x3)), tmp27, xmask)
tl.store(out_ptr3 + (x3), tmp24, xmask)
tl.store(out_ptr0 + (x3), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [pad], Original ATen: [aten.reflection_pad2d]
stream0 = get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0.run(primals_1, buf0, 576, grid=grid(576), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
buf3 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf4 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf6 = reinterpret_tensor(buf4, (1, 16, 1, 1), (16, 1, 1, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d, instance_norm], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_convolution_1.run(buf2, buf6, primals_3, buf3, 16, 16, grid=grid(16), stream=stream0)
del primals_3
buf7 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, pad_1], Original ATen: [aten.relu, aten.reflection_pad2d]
triton_poi_fused_reflection_pad2d_relu_2.run(buf2, buf3, buf6, buf7, 576, grid=grid(576), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 4, 4), (64, 16, 4, 1))
buf9 = buf8; del buf8 # reuse
buf10 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf13 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_1, x_1, x_2], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.add]
triton_per_fused__native_batch_norm_legit_add_convolution_3.run(buf9, primals_5, primals_1, buf10, buf14, buf13, 16, 16, grid=grid(16), stream=stream0)
del primals_1
del primals_5
return (buf14, primals_2, primals_4, buf0, buf2, buf3, buf6, buf7, buf9, reinterpret_tensor(buf13, (16, ), (1, ), 0), reinterpret_tensor(buf10, (1, 16, 1, 1), (16, 1, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class resBlock(nn.Module):
def __init__(self, channelDepth, windowSize=3):
super(resBlock, self).__init__()
self.pad = nn.ReflectionPad2d(1)
self.IN_conv1 = nn.InstanceNorm2d(channelDepth)
self.conv1 = nn.Conv2d(channelDepth, channelDepth, windowSize, 1, 0)
self.conv2 = nn.Conv2d(channelDepth, channelDepth, windowSize, 1, 0)
def forward(self, x):
res = x
x = F.relu(self.IN_conv1(self.conv1(self.pad(x))))
x = self.IN_conv1(self.conv2(self.pad(x)))
x = x + res
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channelDepth': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_1(in_out_ptr0,
in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp18 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x3, tmp23, xmask)
tl.store(out_ptr0 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_reflection_pad2d_relu_2(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + x3, tmp6, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_convolution_3(in_out_ptr0,
in_ptr0, in_ptr1, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr1 + (r2 + 16 * x3), xmask, other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = tmp2 - tmp12
tmp20 = 16.0
tmp21 = tmp18 / tmp20
tmp22 = 1e-05
tmp23 = tmp21 + tmp22
tmp24 = libdevice.rsqrt(tmp23)
tmp25 = tmp19 * tmp24
tmp27 = tmp25 + tmp26
tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask)
tl.store(out_ptr2 + (r2 + 16 * x3), tmp27, xmask)
tl.store(out_ptr3 + x3, tmp24, xmask)
tl.store(out_ptr0 + x3, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(576)](primals_1, buf0, 576,
XBLOCK=128, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
buf3 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf4 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf6 = reinterpret_tensor(buf4, (1, 16, 1, 1), (16, 1, 1, 1), 0)
del buf4
triton_per_fused__native_batch_norm_legit_convolution_1[grid(16)](buf2,
buf6, primals_3, buf3, 16, 16, XBLOCK=1, num_warps=2, num_stages=1)
del primals_3
buf7 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
triton_poi_fused_reflection_pad2d_relu_2[grid(576)](buf2, buf3,
buf6, buf7, 576, XBLOCK=128, num_warps=4, num_stages=1)
buf8 = extern_kernels.convolution(buf7, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 4, 4), (64, 16, 4, 1))
buf9 = buf8
del buf8
buf10 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf13 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
triton_per_fused__native_batch_norm_legit_add_convolution_3[grid(16)](
buf9, primals_5, primals_1, buf10, buf14, buf13, 16, 16, XBLOCK
=1, num_warps=2, num_stages=1)
del primals_1
del primals_5
return (buf14, primals_2, primals_4, buf0, buf2, buf3, buf6, buf7, buf9,
reinterpret_tensor(buf13, (16,), (1,), 0), reinterpret_tensor(buf10,
(1, 16, 1, 1), (16, 1, 1, 1), 0))
class resBlockNew(nn.Module):
def __init__(self, channelDepth, windowSize=3):
super(resBlockNew, self).__init__()
self.pad = nn.ReflectionPad2d(1)
self.IN_conv1 = nn.InstanceNorm2d(channelDepth)
self.conv1 = nn.Conv2d(channelDepth, channelDepth, windowSize, 1, 0)
self.conv2 = nn.Conv2d(channelDepth, channelDepth, windowSize, 1, 0)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| SeokjaeLIM/DSSN_release-Pytorch | resBlock | false | 17,897 | [
"Apache-2.0"
] | 7 | fef1dac120d7b83367b4c69f239b089ab5f004d7 | https://github.com/SeokjaeLIM/DSSN_release-Pytorch/tree/fef1dac120d7b83367b4c69f239b089ab5f004d7 |
WeightedFeatureFusion | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/nz/cnzuftnmmebl6ne2xmorh3ifvszanwobhjq52c7hosfisapewi6i.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x => add
# x_1 => add_1
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %select), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %select_1), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (256 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tmp2 + tmp1
tl.store(out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (5, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((5, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torchvision.models.resnet import *
import torch.utils.data
class WeightedFeatureFusion(nn.Module):
def __init__(self, layers, weight=False):
super(WeightedFeatureFusion, self).__init__()
self.layers = layers
self.weight = weight
self.n = len(layers) + 1
if weight:
self.w = nn.Parameter(torch.zeros(self.n), requires_grad=True)
def forward(self, x, outputs):
if self.weight:
w = torch.sigmoid(self.w) * (2 / self.n)
x = x * w[0]
nx = x.shape[1]
for i in range(self.n - 1):
a = outputs[self.layers[i]] * w[i + 1] if self.weight else outputs[
self.layers[i]]
na = a.shape[1]
if nx == na:
x = x + a
elif nx > na:
x[:, :na] = x[:, :na] + a
else:
x = x + a[:, :nx]
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([5, 4, 4, 4])]
def get_init_inputs():
return [[], {'layers': [4, 4]}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torchvision.models.resnet import *
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + (256 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tmp2 + tmp1
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (5, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK
=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class WeightedFeatureFusionNew(nn.Module):
def __init__(self, layers, weight=False):
super(WeightedFeatureFusionNew, self).__init__()
self.layers = layers
self.weight = weight
self.n = len(layers) + 1
if weight:
self.w = nn.Parameter(torch.zeros(self.n), requires_grad=True)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| PanJason/ML_Proj | WeightedFeatureFusion | false | 17,898 | [
"MIT"
] | 4 | 663be12e8eb6e30e3c902a4984ac0db33bfce605 | https://github.com/PanJason/ML_Proj/tree/663be12e8eb6e30e3c902a4984ac0db33bfce605 |
ConformerFeedForward | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/s5/cs5cii52tc3hbclol2cj5szknqlzoteg2pw4i5j57dyaz6f54ybl.py
# Topologically Sorted Source Nodes: [sigmoid, x_1], Original ATen: [aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# sigmoid => sigmoid
# x_1 => mul
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %sigmoid), kwargs = {})
triton_poi_fused_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_mul_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid, x_1], Original ATen: [aten.sigmoid, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.utils.data
import torch.optim
class Swish(nn.Module):
"""
Swish activation function introduced in 'https://arxiv.org/abs/1710.05941'
"""
def forward(self, x):
return x * torch.sigmoid(x)
class ConformerFeedForward(nn.Module):
"""
feed-forward module of Conformer model.
"""
def __init__(self, d_model, d_ff, dropout, activation=Swish()):
super(ConformerFeedForward, self).__init__()
self.linear1 = nn.Linear(d_model, d_ff)
self.activation = activation
self.dropout = nn.Dropout(p=dropout)
self.linear2 = nn.Linear(d_ff, d_model)
def forward(self, x):
x = self.linear1(x)
x = self.activation(x)
x = self.dropout(x)
x = self.linear2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'd_ff': 4, 'dropout': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.utils.data
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0[grid(256)](buf0, buf1, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4
class Swish(nn.Module):
"""
Swish activation function introduced in 'https://arxiv.org/abs/1710.05941'
"""
def forward(self, x):
return x * torch.sigmoid(x)
class ConformerFeedForwardNew(nn.Module):
"""
feed-forward module of Conformer model.
"""
def __init__(self, d_model, d_ff, dropout, activation=Swish()):
super(ConformerFeedForwardNew, self).__init__()
self.linear1 = nn.Linear(d_model, d_ff)
self.activation = activation
self.dropout = nn.Dropout(p=dropout)
self.linear2 = nn.Linear(d_ff, d_model)
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| ShantanuNair/NeMo | ConformerFeedForward | false | 17,899 | [
"Apache-2.0"
] | 10 | d01b7bbc3fdb1bbf14789f71b8f368cf0aa8f86b | https://github.com/ShantanuNair/NeMo/tree/d01b7bbc3fdb1bbf14789f71b8f368cf0aa8f86b |
FusionAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/vg/cvgn47a3suf3abdi3yxlfefcismdx34uxftikwkcmdazos4igr4d.py
# Topologically Sorted Source Nodes: [query_project_1], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# query_project_1 => gt, mul, where
# Graph fragment:
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.01), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul), kwargs = {})
triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/so/csof3x4y7lrn7on2ocdmxal3ggsfnqhrpinejjswa64wfco7qk6s.py
# Topologically Sorted Source Nodes: [attention_weight], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention_weight => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/n2/cn2d7m5uq6db3dfhz3ze6y5xji4hq7paw66kfee3iwqh6vc2tgis.py
# Topologically Sorted Source Nodes: [attention_weight], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention_weight => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/5t/c5tbb6lfln6dfrdwc65qi6ycvqtlhp7vdukr5nqihqc4jplvfo62.py
# Topologically Sorted Source Nodes: [attention_vec, attention_vec_1], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# attention_vec => mul_1
# attention_vec_1 => sum_2
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %div), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1]), kwargs = {})
triton_poi_fused_mul_sum_3 = async_compile.triton('triton_poi_fused_mul_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16)
x3 = xindex % 16
x1 = (xindex // 4) % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + (64*x2)), xmask)
tmp1 = tl.load(in_ptr1 + (x1 + (16*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x3 + (64*x2)), xmask)
tmp4 = tl.load(in_ptr1 + (4 + x1 + (16*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (32 + x3 + (64*x2)), xmask)
tmp8 = tl.load(in_ptr1 + (8 + x1 + (16*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x3 + (64*x2)), xmask)
tmp12 = tl.load(in_ptr1 + (12 + x1 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + (x4), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 4), (4, 1))
assert_size_stride(primals_5, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [query_project_1], Original ATen: [aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 256, grid=grid(256), stream=stream0)
del buf0
del primals_2
buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [project_value], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_5
buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [attention_weight], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf4, buf5, 64, grid=grid(64), stream=stream0)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [attention_weight], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf5, buf6, 64, grid=grid(64), stream=stream0)
buf7 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [attention_vec, attention_vec_1], Original ATen: [aten.mul, aten.sum]
triton_poi_fused_mul_sum_3.run(primals_3, buf6, buf7, 64, grid=grid(64), stream=stream0)
return (buf7, buf6, primals_3, buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), buf6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
class FusionAttention(nn.Module):
def __init__(self, dim):
super(FusionAttention, self).__init__()
self.attention_matrix = nn.Linear(dim, dim)
self.project_weight = nn.Linear(dim, 1)
def forward(self, inputs):
query_project = self.attention_matrix(inputs)
query_project = F.leaky_relu(query_project)
project_value = self.project_weight(query_project)
attention_weight = torch.softmax(project_value, dim=1)
attention_vec = inputs * attention_weight
attention_vec = torch.sum(attention_vec, dim=1)
return attention_vec, attention_weight
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x3 = xindex % 16
x1 = xindex // 4 % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + 64 * x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x3 + 64 * x2), xmask)
tmp4 = tl.load(in_ptr1 + (4 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (32 + x3 + 64 * x2), xmask)
tmp8 = tl.load(in_ptr1 + (8 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x3 + 64 * x2), xmask)
tmp12 = tl.load(in_ptr1 + (12 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x4, tmp14, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 4), (4, 1))
assert_size_stride(primals_5, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(256)](buf0, primals_2, buf1,
buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del primals_2
buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_5
buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf4
triton_poi_fused__softmax_2[grid(64)](buf5, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0)
del buf5
triton_poi_fused_mul_sum_3[grid(64)](primals_3, buf6, buf7, 64,
XBLOCK=64, num_warps=1, num_stages=1)
return buf7, buf6, primals_3, buf1, reinterpret_tensor(buf2, (64, 4), (
4, 1), 0), buf6, primals_4
class FusionAttentionNew(nn.Module):
def __init__(self, dim):
super(FusionAttentionNew, self).__init__()
self.attention_matrix = nn.Linear(dim, dim)
self.project_weight = nn.Linear(dim, 1)
def forward(self, input_0):
primals_1 = self.attention_matrix.weight
primals_2 = self.attention_matrix.bias
primals_4 = self.project_weight.weight
primals_5 = self.project_weight.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1]
| Seondong/Customs-Fraud-Detection | FusionAttention | false | 17,900 | [
"MIT"
] | 7 | eb9e4641a78cb32d73787de86dd72ebb09df1452 | https://github.com/Seondong/Customs-Fraud-Detection/tree/eb9e4641a78cb32d73787de86dd72ebb09df1452 |
MultiLayerPerceptron | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/4g/c4guhk7x6skkidedvs2gxz2kcu6gb76l3ig5crjjvjtzvnjlhlte.py
# Topologically Sorted Source Nodes: [output_states_2], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# output_states_2 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/27/c27nz7qakwg4mesqev3mbp5r3cm7x6cfgpenxv4qqlpbpvqn6dpc.py
# Topologically Sorted Source Nodes: [output_states_4], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# output_states_4 => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [-1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {})
triton_poi_fused__log_softmax_1 = async_compile.triton('triton_poi_fused__log_softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ag/cagdooqnmqsu3yijgx2xhwfomhimfdtgaadz5y4zn3ej2iv7onro.py
# Topologically Sorted Source Nodes: [output_states_4], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# output_states_4 => exp, log, sub_1, sum_1
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_poi_fused__log_softmax_2 = async_compile.triton('triton_poi_fused__log_softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [output_states_2], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_3, buf5, 256, grid=grid(256), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output_states_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output_states_4], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_1.run(buf2, buf3, 256, grid=grid(256), stream=stream0)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [output_states_4], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_2.run(buf3, buf4, 256, grid=grid(256), stream=stream0)
del buf3
return (buf4, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf4, primals_4, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch.optim
class MultiLayerPerceptron(torch.nn.Module):
"""
A simple MLP that can either be used independently or put on top
of pretrained models (such as BERT) and act as a classifier.
Args:
hidden_size (int): the size of each layer
num_classes (int): number of output classes
num_layers (int): number of layers
activation (str): type of activations for layers in between
log_softmax (bool): whether to add a log_softmax layer before output
"""
def __init__(self, hidden_size: 'int', num_classes: 'int', num_layers:
'int'=2, activation: 'str'='relu', log_softmax: 'bool'=True):
super().__init__()
self.layers = 0
for _ in range(num_layers - 1):
layer = torch.nn.Linear(hidden_size, hidden_size)
setattr(self, f'layer{self.layers}', layer)
setattr(self, f'layer{self.layers + 1}', getattr(torch, activation)
)
self.layers += 2
layer = torch.nn.Linear(hidden_size, num_classes)
setattr(self, f'layer{self.layers}', layer)
self.layers += 1
self.log_softmax = log_softmax
@property
def last_linear_layer(self):
return getattr(self, f'layer{self.layers - 1}')
def forward(self, hidden_states):
output_states = hidden_states[:]
for i in range(self.layers):
output_states = getattr(self, f'layer{i}')(output_states)
if self.log_softmax:
output_states = torch.log_softmax(output_states, dim=-1)
return output_states
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4, 'num_classes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_3, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=
128, num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused__log_softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del buf3
return buf4, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf4, primals_4, buf5
class MultiLayerPerceptronNew(torch.nn.Module):
"""
A simple MLP that can either be used independently or put on top
of pretrained models (such as BERT) and act as a classifier.
Args:
hidden_size (int): the size of each layer
num_classes (int): number of output classes
num_layers (int): number of layers
activation (str): type of activations for layers in between
log_softmax (bool): whether to add a log_softmax layer before output
"""
def __init__(self, hidden_size: 'int', num_classes: 'int', num_layers:
'int'=2, activation: 'str'='relu', log_softmax: 'bool'=True):
super().__init__()
self.layers = 0
for _ in range(num_layers - 1):
layer = torch.nn.Linear(hidden_size, hidden_size)
setattr(self, f'layer{self.layers}', layer)
setattr(self, f'layer{self.layers + 1}', getattr(torch, activation)
)
self.layers += 2
layer = torch.nn.Linear(hidden_size, num_classes)
setattr(self, f'layer{self.layers}', layer)
self.layers += 1
self.log_softmax = log_softmax
@property
def last_linear_layer(self):
return getattr(self, f'layer{self.layers - 1}')
def forward(self, input_0):
primals_2 = self.layer0.weight
primals_3 = self.layer0.bias
primals_4 = self.layer2.weight
primals_5 = self.layer2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| ShantanuNair/NeMo | MultiLayerPerceptron | false | 17,901 | [
"Apache-2.0"
] | 10 | d01b7bbc3fdb1bbf14789f71b8f368cf0aa8f86b | https://github.com/ShantanuNair/NeMo/tree/d01b7bbc3fdb1bbf14789f71b8f368cf0aa8f86b |
_Residual_Block | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/kn/cknsk65zb3vr654gzimo2avm4pkfs2utom2x2kles4puveax6xpk.py
# Topologically Sorted Source Nodes: [instance_norm, output], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.leaky_relu]
# Source node to ATen node mapping:
# instance_norm => add, add_1, mul, mul_1, repeat, rsqrt, sub, var_mean
# output => gt, mul_2, where
# Graph fragment:
# %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_3, [4]), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %unsqueeze_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_3), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.2), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul_2), kwargs = {})
triton_red_fused__native_batch_norm_legit_leaky_relu_repeat_0 = async_compile.triton('triton_red_fused__native_batch_norm_legit_leaky_relu_repeat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[256, 4096],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__native_batch_norm_legit_leaky_relu_repeat_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused__native_batch_norm_legit_leaky_relu_repeat_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 256
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0 % 64), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
tmp3_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp3_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp3_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp1 = tl.load(in_ptr1 + (r1 + (4096*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp3_mean_next, tmp3_m2_next, tmp3_weight_next = triton_helpers.welford_reduce(
tmp2, tmp3_mean, tmp3_m2, tmp3_weight, roffset == 0
)
tmp3_mean = tl.where(rmask & xmask, tmp3_mean_next, tmp3_mean)
tmp3_m2 = tl.where(rmask & xmask, tmp3_m2_next, tmp3_m2)
tmp3_weight = tl.where(rmask & xmask, tmp3_weight_next, tmp3_weight)
tmp3_tmp, tmp4_tmp, tmp5_tmp = triton_helpers.welford(
tmp3_mean, tmp3_m2, tmp3_weight, 1
)
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tmp5 = tmp5_tmp[:, None]
tl.store(out_ptr1 + (x0), tmp3, xmask)
tmp15 = tl.load(in_ptr2 + (x0 % 64), xmask, eviction_policy='evict_last')
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp6 = tl.load(in_ptr1 + (r1 + (4096*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp7 = tmp6 - tmp3
tmp8 = 4096.0
tmp9 = tmp4 / tmp8
tmp10 = 1e-05
tmp11 = tmp9 + tmp10
tmp12 = libdevice.rsqrt(tmp11)
tmp13 = tmp7 * tmp12
tmp14 = tmp13 * tmp0
tmp16 = tmp14 + tmp15
tmp17 = 0.0
tmp18 = tmp16 > tmp17
tmp19 = 0.2
tmp20 = tmp16 * tmp19
tmp21 = tl.where(tmp18, tmp16, tmp20)
tl.store(in_out_ptr0 + (r1 + (4096*x0)), tmp21, rmask & xmask)
tmp22 = 4096.0
tmp23 = tmp4 / tmp22
tmp24 = 1e-05
tmp25 = tmp23 + tmp24
tmp26 = libdevice.rsqrt(tmp25)
tl.store(out_ptr3 + (x0), tmp26, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/nq/cnqxwi6vwq4hgipp4p6lhupoft2wcabhzlw7z3yr6efrfzmfroxh.py
# Topologically Sorted Source Nodes: [output_1, output_2], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.add]
# Source node to ATen node mapping:
# output_1 => add_2, repeat_2, rsqrt_1, var_mean_1
# output_2 => add_4
# Graph fragment:
# %repeat_2 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_6, [4]), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_5, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_6, %primals_1), kwargs = {})
triton_red_fused__native_batch_norm_legit_add_repeat_1 = async_compile.triton('triton_red_fused__native_batch_norm_legit_add_repeat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[256, 4096],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__native_batch_norm_legit_add_repeat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused__native_batch_norm_legit_add_repeat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 256
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0 % 64), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
tmp3_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp3_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp3_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp1 = tl.load(in_ptr1 + (r1 + (4096*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp3_mean_next, tmp3_m2_next, tmp3_weight_next = triton_helpers.welford_reduce(
tmp2, tmp3_mean, tmp3_m2, tmp3_weight, roffset == 0
)
tmp3_mean = tl.where(rmask & xmask, tmp3_mean_next, tmp3_mean)
tmp3_m2 = tl.where(rmask & xmask, tmp3_m2_next, tmp3_m2)
tmp3_weight = tl.where(rmask & xmask, tmp3_weight_next, tmp3_weight)
tmp3_tmp, tmp4_tmp, tmp5_tmp = triton_helpers.welford(
tmp3_mean, tmp3_m2, tmp3_weight, 1
)
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tmp5 = tmp5_tmp[:, None]
tl.store(out_ptr1 + (x0), tmp3, xmask)
x2 = xindex % 64
tmp15 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp6 = tl.load(in_ptr1 + (r1 + (4096*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp17 = tl.load(in_ptr3 + (r1 + (4096*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp7 = tmp6 - tmp3
tmp8 = 4096.0
tmp9 = tmp4 / tmp8
tmp10 = 1e-05
tmp11 = tmp9 + tmp10
tmp12 = libdevice.rsqrt(tmp11)
tmp13 = tmp7 * tmp12
tmp14 = tmp13 * tmp0
tmp16 = tmp14 + tmp15
tmp18 = tmp16 + tmp17
tl.store(out_ptr3 + (r1 + (4096*x0)), tmp18, rmask & xmask)
tmp19 = 4096.0
tmp20 = tmp4 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr4 + (x0), tmp23, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 64, 64, 64), (262144, 4096, 64, 1))
assert_size_stride(primals_2, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_3, (64, ), (1, ))
assert_size_stride(primals_4, (64, ), (1, ))
assert_size_stride(primals_5, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_6, (64, ), (1, ))
assert_size_stride(primals_7, (64, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = empty_strided_cuda((256, ), (1, ), torch.float32)
buf2 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32)
buf6 = empty_strided_cuda((1, 256, 64, 64), (1048576, 4096, 64, 1), torch.float32)
buf7 = reinterpret_tensor(buf6, (4, 64, 64, 64), (262144, 4096, 64, 1), 0); del buf6 # reuse
buf5 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32)
# Topologically Sorted Source Nodes: [instance_norm, output], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_red_fused__native_batch_norm_legit_leaky_relu_repeat_0.run(buf7, primals_3, buf0, primals_4, buf1, buf2, buf5, 256, 4096, grid=grid(256), stream=stream0)
del primals_3
del primals_4
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf9 = empty_strided_cuda((256, ), (1, ), torch.float32)
buf10 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32)
buf14 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32)
buf13 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32)
# Topologically Sorted Source Nodes: [output_1, output_2], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.add]
triton_red_fused__native_batch_norm_legit_add_repeat_1.run(primals_6, buf8, primals_7, primals_1, buf9, buf10, buf14, buf13, 256, 4096, grid=grid(256), stream=stream0)
del primals_6
del primals_7
return (buf14, primals_1, primals_2, primals_5, buf0, buf1, reinterpret_tensor(buf5, (256, ), (1, ), 0), buf7, buf8, buf9, reinterpret_tensor(buf13, (256, ), (1, ), 0), reinterpret_tensor(buf10, (1, 256, 1, 1), (256, 1, 1, 1), 0), reinterpret_tensor(buf2, (1, 256, 1, 1), (256, 1, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 64, 64, 64), (262144, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class _Residual_Block(nn.Module):
def __init__(self):
super(_Residual_Block, self).__init__()
self.conv1 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size
=3, stride=1, padding=1, bias=False)
self.in1 = nn.InstanceNorm2d(64, affine=True)
self.relu = nn.LeakyReLU(0.2, inplace=True)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size
=3, stride=1, padding=1, bias=False)
self.in2 = nn.InstanceNorm2d(64, affine=True)
def forward(self, x):
identity_data = x
output = self.relu(self.in1(self.conv1(x)))
output = self.in2(self.conv2(output))
output = torch.add(output, identity_data)
return output
def get_inputs():
return [torch.rand([4, 64, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_red_fused__native_batch_norm_legit_leaky_relu_repeat_0(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, xnumel, rnumel,
XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 256
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0 % 64, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
tmp3_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp3_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp3_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp1 = tl.load(in_ptr1 + (r1 + 4096 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp3_mean_next, tmp3_m2_next, tmp3_weight_next = (triton_helpers.
welford_reduce(tmp2, tmp3_mean, tmp3_m2, tmp3_weight, roffset == 0)
)
tmp3_mean = tl.where(rmask & xmask, tmp3_mean_next, tmp3_mean)
tmp3_m2 = tl.where(rmask & xmask, tmp3_m2_next, tmp3_m2)
tmp3_weight = tl.where(rmask & xmask, tmp3_weight_next, tmp3_weight)
tmp3_tmp, tmp4_tmp, tmp5_tmp = triton_helpers.welford(tmp3_mean,
tmp3_m2, tmp3_weight, 1)
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tmp5_tmp[:, None]
tl.store(out_ptr1 + x0, tmp3, xmask)
tmp15 = tl.load(in_ptr2 + x0 % 64, xmask, eviction_policy='evict_last')
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp6 = tl.load(in_ptr1 + (r1 + 4096 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp7 = tmp6 - tmp3
tmp8 = 4096.0
tmp9 = tmp4 / tmp8
tmp10 = 1e-05
tmp11 = tmp9 + tmp10
tmp12 = libdevice.rsqrt(tmp11)
tmp13 = tmp7 * tmp12
tmp14 = tmp13 * tmp0
tmp16 = tmp14 + tmp15
tmp17 = 0.0
tmp18 = tmp16 > tmp17
tmp19 = 0.2
tmp20 = tmp16 * tmp19
tmp21 = tl.where(tmp18, tmp16, tmp20)
tl.store(in_out_ptr0 + (r1 + 4096 * x0), tmp21, rmask & xmask)
tmp22 = 4096.0
tmp23 = tmp4 / tmp22
tmp24 = 1e-05
tmp25 = tmp23 + tmp24
tmp26 = libdevice.rsqrt(tmp25)
tl.store(out_ptr3 + x0, tmp26, xmask)
@triton.jit
def triton_red_fused__native_batch_norm_legit_add_repeat_1(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel,
rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 256
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0 % 64, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
tmp3_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp3_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp3_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp1 = tl.load(in_ptr1 + (r1 + 4096 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp3_mean_next, tmp3_m2_next, tmp3_weight_next = (triton_helpers.
welford_reduce(tmp2, tmp3_mean, tmp3_m2, tmp3_weight, roffset == 0)
)
tmp3_mean = tl.where(rmask & xmask, tmp3_mean_next, tmp3_mean)
tmp3_m2 = tl.where(rmask & xmask, tmp3_m2_next, tmp3_m2)
tmp3_weight = tl.where(rmask & xmask, tmp3_weight_next, tmp3_weight)
tmp3_tmp, tmp4_tmp, tmp5_tmp = triton_helpers.welford(tmp3_mean,
tmp3_m2, tmp3_weight, 1)
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tmp5_tmp[:, None]
tl.store(out_ptr1 + x0, tmp3, xmask)
x2 = xindex % 64
tmp15 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp6 = tl.load(in_ptr1 + (r1 + 4096 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp17 = tl.load(in_ptr3 + (r1 + 4096 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp7 = tmp6 - tmp3
tmp8 = 4096.0
tmp9 = tmp4 / tmp8
tmp10 = 1e-05
tmp11 = tmp9 + tmp10
tmp12 = libdevice.rsqrt(tmp11)
tmp13 = tmp7 * tmp12
tmp14 = tmp13 * tmp0
tmp16 = tmp14 + tmp15
tmp18 = tmp16 + tmp17
tl.store(out_ptr3 + (r1 + 4096 * x0), tmp18, rmask & xmask)
tmp19 = 4096.0
tmp20 = tmp4 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr4 + x0, tmp23, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 64, 64, 64), (262144, 4096, 64, 1))
assert_size_stride(primals_2, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_3, (64,), (1,))
assert_size_stride(primals_4, (64,), (1,))
assert_size_stride(primals_5, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_6, (64,), (1,))
assert_size_stride(primals_7, (64,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = empty_strided_cuda((256,), (1,), torch.float32)
buf2 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch
.float32)
buf6 = empty_strided_cuda((1, 256, 64, 64), (1048576, 4096, 64, 1),
torch.float32)
buf7 = reinterpret_tensor(buf6, (4, 64, 64, 64), (262144, 4096, 64,
1), 0)
del buf6
buf5 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch
.float32)
get_raw_stream(0)
triton_red_fused__native_batch_norm_legit_leaky_relu_repeat_0[grid(256)
](buf7, primals_3, buf0, primals_4, buf1, buf2, buf5, 256, 4096,
XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1)
del primals_3
del primals_4
buf8 = extern_kernels.convolution(buf7, primals_5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf9 = empty_strided_cuda((256,), (1,), torch.float32)
buf10 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
buf14 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.float32)
buf13 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
triton_red_fused__native_batch_norm_legit_add_repeat_1[grid(256)](
primals_6, buf8, primals_7, primals_1, buf9, buf10, buf14,
buf13, 256, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1
)
del primals_6
del primals_7
return (buf14, primals_1, primals_2, primals_5, buf0, buf1,
reinterpret_tensor(buf5, (256,), (1,), 0), buf7, buf8, buf9,
reinterpret_tensor(buf13, (256,), (1,), 0), reinterpret_tensor(
buf10, (1, 256, 1, 1), (256, 1, 1, 1), 0), reinterpret_tensor(buf2,
(1, 256, 1, 1), (256, 1, 1, 1), 0))
class _Residual_BlockNew(nn.Module):
def __init__(self):
super(_Residual_BlockNew, self).__init__()
self.conv1 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size
=3, stride=1, padding=1, bias=False)
self.in1 = nn.InstanceNorm2d(64, affine=True)
self.relu = nn.LeakyReLU(0.2, inplace=True)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size
=3, stride=1, padding=1, bias=False)
self.in2 = nn.InstanceNorm2d(64, affine=True)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.in1.weight
primals_4 = self.in1.bias
primals_5 = self.conv2.weight
primals_6 = self.in2.weight
primals_7 = self.in2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| Shandilya21/Improved-Optimization-Tecniques-for-Super-Resoultion-in-Images | _Residual_Block | false | 17,902 | [
"MIT"
] | 10 | d903d99706f557d74e00d4395e7d316172a9f7ee | https://github.com/Shandilya21/Improved-Optimization-Tecniques-for-Super-Resoultion-in-Images/tree/d903d99706f557d74e00d4395e7d316172a9f7ee |
DyIntraModalityUpdate | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/jb/cjbnptluutsevmac5ntrsg5tnheqrel6hsdkhljdxrupyiz7nuqy.py
# Topologically Sorted Source Nodes: [sum_1, v_mean], Original ATen: [aten.sum, aten.div]
# Source node to ATen node mapping:
# sum_1 => sum_1
# v_mean => div
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%primals_1, [1]), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, 4), kwargs = {})
triton_poi_fused_div_sum_0 = async_compile.triton('triton_poi_fused_div_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/3o/c3o6gsdux62rhopxjmqheu5ae4je7kraaxyuqmnmily7cbh2qzom.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_3 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_3 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 12
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/rn/crnlofhsrn34yrzpi3yq4ssri4pefp3ncf3mezkoveybthzsl25c.py
# Topologically Sorted Source Nodes: [add, gated_v_query, gated_v_key, gated_v_val], Original ATen: [aten.add, aten.mul]
# Source node to ATen node mapping:
# add => add
# gated_v_key => mul_1
# gated_v_query => mul
# gated_v_val => mul_2
# Graph fragment:
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%unsqueeze_1, 1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %getitem_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %getitem), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %getitem_2), kwargs = {})
triton_poi_fused_add_mul_2 = async_compile.triton('triton_poi_fused_add_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = (xindex // 16)
x3 = (xindex // 4)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (4 + x0 + (12*x3)), xmask)
tmp6 = tl.load(in_ptr1 + (x0 + (12*x3)), xmask)
tmp8 = tl.load(in_ptr1 + (8 + x0 + (12*x3)), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp5 = tmp3 * tmp4
tmp7 = tmp3 * tmp6
tmp9 = tmp3 * tmp8
tl.store(out_ptr0 + (x4), tmp5, xmask)
tl.store(out_ptr1 + (x4), tmp7, xmask)
tl.store(out_ptr2 + (x4), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/wj/cwjd35o2nbar5uat5nbemgsct5x6hpw74gha2u7llss4zgezvg5t.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
# Source node to ATen node mapping:
# matmul => bmm
# Graph fragment:
# %bmm : [num_users=2] = call_function[target=torch.ops.aten.bmm.default](args = (%expand, %expand_1), kwargs = {})
triton_poi_fused_bmm_3 = async_compile.triton('triton_poi_fused_bmm_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_bmm_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/re/cre7w75rug2us4dh57l67bx5rwo3jz6peysf4en72be5wnd6jfnk.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => exp
# Graph fragment:
# %mul_tensor_7 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {})
# %amax_default_7 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_7, [2], True), kwargs = {})
# %sub_tensor_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_7, %amax_default_7), kwargs = {})
# %div_tensor_7 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_7, 1.0), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_7,), kwargs = {})
triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ws/cwsjjvqyab6dnbgvlapznnpbpik6q6whfdh34725bhmfoo7lmth5.py
# Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.bmm]
# Source node to ATen node mapping:
# matmul_2 => bmm_2
# Graph fragment:
# %bmm_2 : [num_users=2] = call_function[target=torch.ops.aten.bmm.default](args = (%expand_4, %expand_5), kwargs = {})
triton_poi_fused_bmm_5 = async_compile.triton('triton_poi_fused_bmm_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_bmm_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/52/c52gx67b7tyuyq6gzoacxctm2mahk6um5i2q5qzsylyrloycjtzf.py
# Topologically Sorted Source Nodes: [v_update_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# v_update_1 => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%sum_5, %sum_9], 2), kwargs = {})
triton_poi_fused_cat_6 = async_compile.triton('triton_poi_fused_cat_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x3 = (xindex // 2)
x2 = (xindex // 8)
x5 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4*x3), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (1 + (4*x3)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr0 + (2 + (4*x3)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp9 = tmp7 + tmp8
tmp10 = tl.load(in_ptr0 + (3 + (4*x3)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tmp5 / tmp11
tmp13 = tl.load(in_ptr1 + (16*x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp14 = tmp12 * tmp13
tmp15 = tmp6 / tmp11
tmp16 = tl.load(in_ptr1 + (4 + (16*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp19 = tmp8 / tmp11
tmp20 = tl.load(in_ptr1 + (8 + (16*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp10 / tmp11
tmp24 = tl.load(in_ptr1 + (12 + (16*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype)
tmp28 = tl.where(tmp4, tmp26, tmp27)
tmp29 = tmp0 >= tmp3
tmp30 = tl.full([1], 2, tl.int64)
tmp31 = tmp0 < tmp30
tmp32 = tl.load(in_ptr2 + (4*x3), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp33 = tl.load(in_ptr2 + (1 + (4*x3)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp34 = tmp32 + tmp33
tmp35 = tl.load(in_ptr2 + (2 + (4*x3)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp36 = tmp34 + tmp35
tmp37 = tl.load(in_ptr2 + (3 + (4*x3)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp38 = tmp36 + tmp37
tmp39 = tmp32 / tmp38
tmp40 = tl.load(in_ptr1 + (1 + (16*x2)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp41 = tmp39 * tmp40
tmp42 = tmp33 / tmp38
tmp43 = tl.load(in_ptr1 + (5 + (16*x2)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp44 = tmp42 * tmp43
tmp45 = tmp41 + tmp44
tmp46 = tmp35 / tmp38
tmp47 = tl.load(in_ptr1 + (9 + (16*x2)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp48 = tmp46 * tmp47
tmp49 = tmp45 + tmp48
tmp50 = tmp37 / tmp38
tmp51 = tl.load(in_ptr1 + (13 + (16*x2)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp52 = tmp50 * tmp51
tmp53 = tmp49 + tmp52
tmp54 = tl.full(tmp53.shape, 0.0, tmp53.dtype)
tmp55 = tl.where(tmp29, tmp53, tmp54)
tmp56 = tl.where(tmp4, tmp28, tmp55)
tl.store(out_ptr0 + (x5), tmp56, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/tq/ctqc7xv7muldsujo5f3akjbneattuuq3ahd6elotd3sifjkkpqop.py
# Topologically Sorted Source Nodes: [matmul_4], Original ATen: [aten.bmm]
# Source node to ATen node mapping:
# matmul_4 => bmm_4
# Graph fragment:
# %bmm_4 : [num_users=2] = call_function[target=torch.ops.aten.bmm.default](args = (%expand_8, %expand_9), kwargs = {})
triton_poi_fused_bmm_7 = async_compile.triton('triton_poi_fused_bmm_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_bmm_7(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/x7/cx77meftt2lg5lziumjwcvx6mo7bmqz6hljy4wreluegyll4z642.py
# Topologically Sorted Source Nodes: [v_update_2], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# v_update_2 => cat_2
# Graph fragment:
# %cat_2 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%cat, %sum_13], 2), kwargs = {})
triton_poi_fused_cat_8 = async_compile.triton('triton_poi_fused_cat_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x3 = (xindex // 3)
x2 = (xindex // 12)
x5 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((2*x3) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 3, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (4*x3), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.load(in_ptr1 + (1 + (4*x3)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.load(in_ptr1 + (2 + (4*x3)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.load(in_ptr1 + (3 + (4*x3)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tmp13 + tmp14
tmp16 = tmp9 / tmp15
tmp17 = tl.load(in_ptr2 + (2 + (16*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tmp16 * tmp17
tmp19 = tmp10 / tmp15
tmp20 = tl.load(in_ptr2 + (6 + (16*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp12 / tmp15
tmp24 = tl.load(in_ptr2 + (10 + (16*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tmp14 / tmp15
tmp28 = tl.load(in_ptr2 + (14 + (16*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp29 = tmp27 * tmp28
tmp30 = tmp26 + tmp29
tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype)
tmp32 = tl.where(tmp6, tmp30, tmp31)
tmp33 = tl.where(tmp4, tmp5, tmp32)
tl.store(out_ptr0 + (x5), tmp33, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ou/couf67lchlspm7jcfqisjif3l3qapi5eprdqus5e32holjtsqni4.py
# Topologically Sorted Source Nodes: [matmul_6], Original ATen: [aten.bmm]
# Source node to ATen node mapping:
# matmul_6 => bmm_6
# Graph fragment:
# %bmm_6 : [num_users=2] = call_function[target=torch.ops.aten.bmm.default](args = (%expand_12, %expand_13), kwargs = {})
triton_poi_fused_bmm_9 = async_compile.triton('triton_poi_fused_bmm_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_bmm_9(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/by/cbytl4u655pkbts2a2ebnj523uuzrxpksueemxh7wivafqvkli52.py
# Topologically Sorted Source Nodes: [v_update_3, add_6], Original ATen: [aten.cat, aten.add]
# Source node to ATen node mapping:
# add_6 => add_6
# v_update_3 => cat_4
# Graph fragment:
# %cat_4 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%cat_2, %sum_17], 2), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %cat_4), kwargs = {})
triton_poi_fused_add_cat_10 = async_compile.triton('triton_poi_fused_add_cat_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_cat_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_cat_10(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x4 = (xindex // 4)
x2 = (xindex // 16)
x3 = xindex
tmp34 = tl.load(in_ptr3 + (x3), xmask)
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 3, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((3*x4) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 4, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (4*x4), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.load(in_ptr1 + (1 + (4*x4)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.load(in_ptr1 + (2 + (4*x4)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.load(in_ptr1 + (3 + (4*x4)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tmp13 + tmp14
tmp16 = tmp9 / tmp15
tmp17 = tl.load(in_ptr2 + (3 + (16*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tmp16 * tmp17
tmp19 = tmp10 / tmp15
tmp20 = tl.load(in_ptr2 + (7 + (16*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp12 / tmp15
tmp24 = tl.load(in_ptr2 + (11 + (16*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tmp14 / tmp15
tmp28 = tl.load(in_ptr2 + (15 + (16*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp29 = tmp27 * tmp28
tmp30 = tmp26 + tmp29
tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype)
tmp32 = tl.where(tmp6, tmp30, tmp31)
tmp33 = tl.where(tmp4, tmp5, tmp32)
tmp35 = tmp34 + tmp33
tl.store(in_out_ptr0 + (x3), tmp35, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/wu/cwudwdjlt7z3vrijwwnpgpwpeh5ilbdurjvdb6y6g3a6jzrcazmb.py
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_7 => relu_2
# Graph fragment:
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_29,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_11 = async_compile.triton('triton_poi_fused_relu_threshold_backward_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_11(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (12, 4), (4, 1))
assert_size_stride(primals_8, (12, ), (1, ))
assert_size_stride(primals_9, (12, 4), (4, 1))
assert_size_stride(primals_10, (12, ), (1, ))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4, ), (1, ))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_1, v_mean], Original ATen: [aten.sum, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_sum_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_2, q_mean], Original ATen: [aten.sum, aten.div]
triton_poi_fused_div_sum_0.run(primals_2, buf1, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_3
del primals_4
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, buf1, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_5
del primals_6
buf4 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 12), (1, 4), 0), out=buf4)
del primals_7
buf5 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 12), (1, 4), 0), out=buf5)
del primals_9
buf6 = reinterpret_tensor(buf4, (4, 4, 12), (48, 12, 1), 0); del buf4 # reuse
buf61 = empty_strided_cuda((4, 4, 12), (48, 12, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf6, primals_8, buf61, 192, grid=grid(192), stream=stream0)
del primals_8
buf7 = reinterpret_tensor(buf5, (4, 4, 12), (48, 12, 1), 0); del buf5 # reuse
buf60 = empty_strided_cuda((4, 4, 12), (48, 12, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf7, primals_10, buf60, 192, grid=grid(192), stream=stream0)
del primals_10
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf20 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, gated_v_query, gated_v_key, gated_v_val], Original ATen: [aten.add, aten.mul]
triton_poi_fused_add_mul_2.run(buf3, buf6, buf8, buf9, buf20, 64, grid=grid(64), stream=stream0)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
triton_poi_fused_bmm_3.run(buf8, buf10, 16, grid=grid(16), stream=stream0)
buf11 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
triton_poi_fused_bmm_3.run(buf9, buf11, 16, grid=grid(16), stream=stream0)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(buf10, buf11, out=buf12)
buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf21 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add_3, gated_q_query, gated_q_key, gated_q_val], Original ATen: [aten.add, aten.mul]
triton_poi_fused_add_mul_2.run(buf2, buf7, buf13, buf14, buf21, 64, grid=grid(64), stream=stream0)
buf15 = reinterpret_tensor(buf11, (4, 4, 1), (4, 1, 16), 0); del buf11 # reuse
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
triton_poi_fused_bmm_3.run(buf13, buf15, 16, grid=grid(16), stream=stream0)
buf16 = reinterpret_tensor(buf10, (4, 1, 4), (4, 16, 1), 0); del buf10 # reuse
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
triton_poi_fused_bmm_3.run(buf14, buf16, 16, grid=grid(16), stream=stream0)
buf17 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
extern_kernels.bmm(buf15, buf16, out=buf17)
buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf12, buf18, 64, grid=grid(64), stream=stream0)
buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf17, buf19, 64, grid=grid(64), stream=stream0)
buf22 = reinterpret_tensor(buf16, (4, 4, 1), (4, 1, 16), 0); del buf16 # reuse
# Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.bmm]
triton_poi_fused_bmm_5.run(buf8, buf22, 16, grid=grid(16), stream=stream0)
buf23 = reinterpret_tensor(buf15, (4, 1, 4), (4, 16, 1), 0); del buf15 # reuse
# Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.bmm]
triton_poi_fused_bmm_5.run(buf9, buf23, 16, grid=grid(16), stream=stream0)
buf24 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.bmm]
extern_kernels.bmm(buf22, buf23, out=buf24)
buf25 = reinterpret_tensor(buf23, (4, 4, 1), (4, 1, 16), 0); del buf23 # reuse
# Topologically Sorted Source Nodes: [matmul_3], Original ATen: [aten.bmm]
triton_poi_fused_bmm_5.run(buf13, buf25, 16, grid=grid(16), stream=stream0)
buf26 = reinterpret_tensor(buf22, (4, 1, 4), (4, 16, 1), 0); del buf22 # reuse
# Topologically Sorted Source Nodes: [matmul_3], Original ATen: [aten.bmm]
triton_poi_fused_bmm_5.run(buf14, buf26, 16, grid=grid(16), stream=stream0)
buf27 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_3], Original ATen: [aten.bmm]
extern_kernels.bmm(buf25, buf26, out=buf27)
buf28 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf24, buf28, 64, grid=grid(64), stream=stream0)
buf29 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_3], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf27, buf29, 64, grid=grid(64), stream=stream0)
buf30 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [v_update_1], Original ATen: [aten.cat]
triton_poi_fused_cat_6.run(buf18, buf20, buf28, buf30, 32, grid=grid(32), stream=stream0)
buf31 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [q_update_1], Original ATen: [aten.cat]
triton_poi_fused_cat_6.run(buf19, buf21, buf29, buf31, 32, grid=grid(32), stream=stream0)
buf32 = reinterpret_tensor(buf26, (4, 4, 1), (4, 1, 16), 0); del buf26 # reuse
# Topologically Sorted Source Nodes: [matmul_4], Original ATen: [aten.bmm]
triton_poi_fused_bmm_7.run(buf8, buf32, 16, grid=grid(16), stream=stream0)
buf33 = reinterpret_tensor(buf25, (4, 1, 4), (4, 16, 1), 0); del buf25 # reuse
# Topologically Sorted Source Nodes: [matmul_4], Original ATen: [aten.bmm]
triton_poi_fused_bmm_7.run(buf9, buf33, 16, grid=grid(16), stream=stream0)
buf34 = buf29; del buf29 # reuse
# Topologically Sorted Source Nodes: [matmul_4], Original ATen: [aten.bmm]
extern_kernels.bmm(buf32, buf33, out=buf34)
buf35 = reinterpret_tensor(buf33, (4, 4, 1), (4, 1, 16), 0); del buf33 # reuse
# Topologically Sorted Source Nodes: [matmul_5], Original ATen: [aten.bmm]
triton_poi_fused_bmm_7.run(buf13, buf35, 16, grid=grid(16), stream=stream0)
buf36 = reinterpret_tensor(buf32, (4, 1, 4), (4, 16, 1), 0); del buf32 # reuse
# Topologically Sorted Source Nodes: [matmul_5], Original ATen: [aten.bmm]
triton_poi_fused_bmm_7.run(buf14, buf36, 16, grid=grid(16), stream=stream0)
buf37 = buf19; del buf19 # reuse
# Topologically Sorted Source Nodes: [matmul_5], Original ATen: [aten.bmm]
extern_kernels.bmm(buf35, buf36, out=buf37)
buf38 = buf28; del buf28 # reuse
# Topologically Sorted Source Nodes: [softmax_4], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf34, buf38, 64, grid=grid(64), stream=stream0)
buf39 = buf18; del buf18 # reuse
# Topologically Sorted Source Nodes: [softmax_5], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf37, buf39, 64, grid=grid(64), stream=stream0)
buf40 = empty_strided_cuda((4, 4, 3), (12, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [v_update_2], Original ATen: [aten.cat]
triton_poi_fused_cat_8.run(buf30, buf38, buf20, buf40, 48, grid=grid(48), stream=stream0)
del buf30
buf41 = empty_strided_cuda((4, 4, 3), (12, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [q_update_2], Original ATen: [aten.cat]
triton_poi_fused_cat_8.run(buf31, buf39, buf21, buf41, 48, grid=grid(48), stream=stream0)
del buf31
buf42 = reinterpret_tensor(buf36, (4, 4, 1), (4, 1, 16), 0); del buf36 # reuse
# Topologically Sorted Source Nodes: [matmul_6], Original ATen: [aten.bmm]
triton_poi_fused_bmm_9.run(buf8, buf42, 16, grid=grid(16), stream=stream0)
buf43 = reinterpret_tensor(buf35, (4, 1, 4), (4, 16, 1), 0); del buf35 # reuse
# Topologically Sorted Source Nodes: [matmul_6], Original ATen: [aten.bmm]
triton_poi_fused_bmm_9.run(buf9, buf43, 16, grid=grid(16), stream=stream0)
buf44 = buf39; del buf39 # reuse
# Topologically Sorted Source Nodes: [matmul_6], Original ATen: [aten.bmm]
extern_kernels.bmm(buf42, buf43, out=buf44)
buf45 = reinterpret_tensor(buf43, (4, 4, 1), (4, 1, 16), 0); del buf43 # reuse
# Topologically Sorted Source Nodes: [matmul_7], Original ATen: [aten.bmm]
triton_poi_fused_bmm_9.run(buf13, buf45, 16, grid=grid(16), stream=stream0)
buf46 = reinterpret_tensor(buf42, (4, 1, 4), (4, 16, 1), 0); del buf42 # reuse
# Topologically Sorted Source Nodes: [matmul_7], Original ATen: [aten.bmm]
triton_poi_fused_bmm_9.run(buf14, buf46, 16, grid=grid(16), stream=stream0)
buf47 = buf38; del buf38 # reuse
# Topologically Sorted Source Nodes: [matmul_7], Original ATen: [aten.bmm]
extern_kernels.bmm(buf45, buf46, out=buf47)
del buf45
del buf46
buf48 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_6], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf44, buf48, 64, grid=grid(64), stream=stream0)
buf49 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_7], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf47, buf49, 64, grid=grid(64), stream=stream0)
buf50 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf52 = buf50; del buf50 # reuse
# Topologically Sorted Source Nodes: [v_update_3, add_6], Original ATen: [aten.cat, aten.add]
triton_poi_fused_add_cat_10.run(buf52, buf40, buf48, buf20, primals_1, 64, grid=grid(64), stream=stream0)
del buf40
buf51 = buf48; del buf48 # reuse
buf55 = buf51; del buf51 # reuse
# Topologically Sorted Source Nodes: [q_update_3, add_7], Original ATen: [aten.cat, aten.add]
triton_poi_fused_add_cat_10.run(buf55, buf41, buf49, buf21, primals_2, 64, grid=grid(64), stream=stream0)
del buf41
buf53 = reinterpret_tensor(buf49, (16, 4), (4, 1), 0); del buf49 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf52, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf53)
buf54 = reinterpret_tensor(buf53, (4, 4, 4), (16, 4, 1), 0); del buf53 # reuse
buf59 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_11.run(buf54, primals_12, buf59, 64, grid=grid(64), stream=stream0)
del primals_12
buf56 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf55, (16, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf56)
buf57 = reinterpret_tensor(buf56, (4, 4, 4), (16, 4, 1), 0); del buf56 # reuse
buf58 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_11.run(buf57, primals_14, buf58, 64, grid=grid(64), stream=stream0)
del primals_14
return (buf54, buf57, buf0, buf1, buf2, buf3, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(buf6, (4, 4, 4), (48, 12, 1), 0), reinterpret_tensor(buf6, (4, 4, 4), (48, 12, 1), 4), reinterpret_tensor(buf6, (4, 4, 4), (48, 12, 1), 8), reinterpret_tensor(buf7, (4, 4, 4), (48, 12, 1), 0), reinterpret_tensor(buf7, (4, 4, 4), (48, 12, 1), 4), reinterpret_tensor(buf7, (4, 4, 4), (48, 12, 1), 8), buf12, buf17, reinterpret_tensor(buf20, (4, 1, 4, 1), (16, 16, 4, 1), 0), reinterpret_tensor(buf21, (4, 1, 4, 1), (16, 16, 4, 1), 0), buf24, buf27, reinterpret_tensor(buf20, (4, 1, 4, 1), (16, 16, 4, 1), 1), reinterpret_tensor(buf21, (4, 1, 4, 1), (16, 16, 4, 1), 1), buf34, buf37, reinterpret_tensor(buf20, (4, 1, 4, 1), (16, 16, 4, 1), 2), reinterpret_tensor(buf21, (4, 1, 4, 1), (16, 16, 4, 1), 2), buf44, buf47, reinterpret_tensor(buf20, (4, 1, 4, 1), (16, 16, 4, 1), 3), reinterpret_tensor(buf21, (4, 1, 4, 1), (16, 16, 4, 1), 3), reinterpret_tensor(buf52, (16, 4), (4, 1), 0), reinterpret_tensor(buf55, (16, 4), (4, 1), 0), buf58, primals_13, buf59, primals_11, reinterpret_tensor(buf13, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf14, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf8, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf9, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf13, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf14, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf8, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf9, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf13, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf14, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf8, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf9, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf13, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf14, (4, 4, 1), (16, 4, 1), 0), reinterpret_tensor(buf8, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf9, (4, 4, 1), (16, 4, 1), 0), buf60, buf61, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class FCNet(nn.Module):
def __init__(self, in_size, out_size, activate=None, drop=0.0):
super(FCNet, self).__init__()
self.lin = nn.Linear(in_size, out_size)
self.drop_value = drop
self.drop = nn.Dropout(drop)
self.activate = activate.lower() if activate is not None else None
if activate == 'relu':
self.ac_fn = nn.ReLU()
elif activate == 'sigmoid':
self.ac_fn = nn.Sigmoid()
elif activate == 'tanh':
self.ac_fn = nn.Tanh()
def forward(self, x):
if self.drop_value > 0:
x = self.drop(x)
x = self.lin(x)
if self.activate is not None:
x = self.ac_fn(x)
return x
class DyIntraModalityUpdate(nn.Module):
"""
Dynamic Intra-Modality Attention Flow
"""
def __init__(self, v_size, q_size, output_size, num_head, drop=0.0):
super(DyIntraModalityUpdate, self).__init__()
self.v_size = v_size
self.q_size = q_size
self.output_size = output_size
self.num_head = num_head
self.v4q_gate_lin = FCNet(v_size, output_size, drop=drop)
self.q4v_gate_lin = FCNet(q_size, output_size, drop=drop)
self.v_lin = FCNet(v_size, output_size * 3, drop=drop, activate='relu')
self.q_lin = FCNet(q_size, output_size * 3, drop=drop, activate='relu')
self.v_output = FCNet(output_size, output_size, drop=drop, activate
='relu')
self.q_output = FCNet(output_size, output_size, drop=drop, activate
='relu')
self.relu = nn.ReLU()
self.tanh = nn.Tanh()
self.sigmoid = nn.Sigmoid()
def forward(self, v, q):
"""
:param v: [batch_size, num_obj, feature_size]
:param q: [batch_size, max_len, feature_size]
:return:
"""
_batch_size, num_obj = v.shape[0], v.shape[1]
max_len = q.shape[1]
v_mean = v.sum(1) / num_obj
q_mean = q.sum(1) / max_len
v4q_gate = self.sigmoid(self.v4q_gate_lin(v_mean)).unsqueeze(1)
q4v_gate = self.sigmoid(self.q4v_gate_lin(q_mean)).unsqueeze(1)
v_tran = self.v_lin(v)
q_tran = self.q_lin(q)
v_key, v_query, v_val = torch.split(v_tran, v_tran.size(2) // 3, dim=2)
q_key, q_query, q_val = torch.split(q_tran, q_tran.size(2) // 3, dim=2)
gated_v_query = (1 + q4v_gate) * v_query
gated_v_key = (1 + q4v_gate) * v_key
gated_v_val = (1 + q4v_gate) * v_val
gated_q_query = (1 + v4q_gate) * q_query
gated_q_key = (1 + v4q_gate) * q_key
gated_q_val = (1 + v4q_gate) * q_val
v_key_set = torch.split(gated_v_key, gated_v_key.size(2) // self.
num_head, dim=2)
v_query_set = torch.split(gated_v_query, gated_v_query.size(2) //
self.num_head, dim=2)
v_val_set = torch.split(gated_v_val, gated_v_val.size(2) // self.
num_head, dim=2)
q_key_set = torch.split(gated_q_key, gated_q_key.size(2) // self.
num_head, dim=2)
q_query_set = torch.split(gated_q_query, gated_q_query.size(2) //
self.num_head, dim=2)
q_val_set = torch.split(gated_q_val, gated_q_val.size(2) // self.
num_head, dim=2)
for i in range(self.num_head):
v_key_slice, v_query_slice, v_val_slice = v_key_set[i
], v_query_set[i], v_val_set[i]
q_key_slice, q_query_slice, q_val_slice = q_key_set[i
], q_query_set[i], q_val_set[i]
v2v = v_query_slice @ v_key_slice.transpose(1, 2) / (self.
output_size // self.num_head) ** 0.5
q2q = q_query_slice @ q_key_slice.transpose(1, 2) / (self.
output_size // self.num_head) ** 0.5
dyIntranMAF_v2v = F.softmax(v2v, dim=2).unsqueeze(3)
dyIntranMAF_q2q = F.softmax(q2q, dim=2).unsqueeze(3)
v_update = (dyIntranMAF_v2v * v_val_slice.unsqueeze(1)).sum(2
) if i == 0 else torch.cat((v_update, (dyIntranMAF_v2v *
v_val_slice.unsqueeze(1)).sum(2)), dim=2)
q_update = (dyIntranMAF_q2q * q_val_slice.unsqueeze(1)).sum(2
) if i == 0 else torch.cat((q_update, (dyIntranMAF_q2q *
q_val_slice.unsqueeze(1)).sum(2)), dim=2)
updated_v = self.v_output(v + v_update)
updated_q = self.q_output(q + q_update)
return updated_v, updated_q
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'v_size': 4, 'q_size': 4, 'output_size': 4, 'num_head': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 12
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_mul_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex // 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp4 = tl.load(in_ptr1 + (4 + x0 + 12 * x3), xmask)
tmp6 = tl.load(in_ptr1 + (x0 + 12 * x3), xmask)
tmp8 = tl.load(in_ptr1 + (8 + x0 + 12 * x3), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp5 = tmp3 * tmp4
tmp7 = tmp3 * tmp6
tmp9 = tmp3 * tmp8
tl.store(out_ptr0 + x4, tmp5, xmask)
tl.store(out_ptr1 + x4, tmp7, xmask)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_bmm_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_bmm_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x3 = xindex // 2
x2 = xindex // 8
x5 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + 4 * x3, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (1 + 4 * x3), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr0 + (2 + 4 * x3), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp9 = tmp7 + tmp8
tmp10 = tl.load(in_ptr0 + (3 + 4 * x3), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tmp5 / tmp11
tmp13 = tl.load(in_ptr1 + 16 * x2, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp14 = tmp12 * tmp13
tmp15 = tmp6 / tmp11
tmp16 = tl.load(in_ptr1 + (4 + 16 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp19 = tmp8 / tmp11
tmp20 = tl.load(in_ptr1 + (8 + 16 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp10 / tmp11
tmp24 = tl.load(in_ptr1 + (12 + 16 * x2), tmp4 & xmask, eviction_policy
='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype)
tmp28 = tl.where(tmp4, tmp26, tmp27)
tmp29 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp32 = tl.load(in_ptr2 + 4 * x3, tmp29 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp33 = tl.load(in_ptr2 + (1 + 4 * x3), tmp29 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp34 = tmp32 + tmp33
tmp35 = tl.load(in_ptr2 + (2 + 4 * x3), tmp29 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp36 = tmp34 + tmp35
tmp37 = tl.load(in_ptr2 + (3 + 4 * x3), tmp29 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp38 = tmp36 + tmp37
tmp39 = tmp32 / tmp38
tmp40 = tl.load(in_ptr1 + (1 + 16 * x2), tmp29 & xmask, eviction_policy
='evict_last', other=0.0)
tmp41 = tmp39 * tmp40
tmp42 = tmp33 / tmp38
tmp43 = tl.load(in_ptr1 + (5 + 16 * x2), tmp29 & xmask, eviction_policy
='evict_last', other=0.0)
tmp44 = tmp42 * tmp43
tmp45 = tmp41 + tmp44
tmp46 = tmp35 / tmp38
tmp47 = tl.load(in_ptr1 + (9 + 16 * x2), tmp29 & xmask, eviction_policy
='evict_last', other=0.0)
tmp48 = tmp46 * tmp47
tmp49 = tmp45 + tmp48
tmp50 = tmp37 / tmp38
tmp51 = tl.load(in_ptr1 + (13 + 16 * x2), tmp29 & xmask,
eviction_policy='evict_last', other=0.0)
tmp52 = tmp50 * tmp51
tmp53 = tmp49 + tmp52
tmp54 = tl.full(tmp53.shape, 0.0, tmp53.dtype)
tmp55 = tl.where(tmp29, tmp53, tmp54)
tmp56 = tl.where(tmp4, tmp28, tmp55)
tl.store(out_ptr0 + x5, tmp56, xmask)
@triton.jit
def triton_poi_fused_bmm_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_cat_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x3 = xindex // 3
x2 = xindex // 12
x5 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (2 * x3 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 3, tl.int64)
tmp9 = tl.load(in_ptr1 + 4 * x3, tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp10 = tl.load(in_ptr1 + (1 + 4 * x3), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.load(in_ptr1 + (2 + 4 * x3), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.load(in_ptr1 + (3 + 4 * x3), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tmp13 + tmp14
tmp16 = tmp9 / tmp15
tmp17 = tl.load(in_ptr2 + (2 + 16 * x2), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp18 = tmp16 * tmp17
tmp19 = tmp10 / tmp15
tmp20 = tl.load(in_ptr2 + (6 + 16 * x2), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp12 / tmp15
tmp24 = tl.load(in_ptr2 + (10 + 16 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tmp14 / tmp15
tmp28 = tl.load(in_ptr2 + (14 + 16 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp29 = tmp27 * tmp28
tmp30 = tmp26 + tmp29
tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype)
tmp32 = tl.where(tmp6, tmp30, tmp31)
tmp33 = tl.where(tmp4, tmp5, tmp32)
tl.store(out_ptr0 + x5, tmp33, xmask)
@triton.jit
def triton_poi_fused_bmm_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_cat_10(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
in_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x4 = xindex // 4
x2 = xindex // 16
x3 = xindex
tmp34 = tl.load(in_ptr3 + x3, xmask)
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 3, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (3 * x4 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 4, tl.int64)
tmp9 = tl.load(in_ptr1 + 4 * x4, tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp10 = tl.load(in_ptr1 + (1 + 4 * x4), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.load(in_ptr1 + (2 + 4 * x4), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.load(in_ptr1 + (3 + 4 * x4), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tmp13 + tmp14
tmp16 = tmp9 / tmp15
tmp17 = tl.load(in_ptr2 + (3 + 16 * x2), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp18 = tmp16 * tmp17
tmp19 = tmp10 / tmp15
tmp20 = tl.load(in_ptr2 + (7 + 16 * x2), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp12 / tmp15
tmp24 = tl.load(in_ptr2 + (11 + 16 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tmp14 / tmp15
tmp28 = tl.load(in_ptr2 + (15 + 16 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp29 = tmp27 * tmp28
tmp30 = tmp26 + tmp29
tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype)
tmp32 = tl.where(tmp6, tmp30, tmp31)
tmp33 = tl.where(tmp4, tmp5, tmp32)
tmp35 = tmp34 + tmp33
tl.store(in_out_ptr0 + x3, tmp35, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_11(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (12, 4), (4, 1))
assert_size_stride(primals_8, (12,), (1,))
assert_size_stride(primals_9, (12, 4), (4, 1))
assert_size_stride(primals_10, (12,), (1,))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_sum_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_div_sum_0[grid(16)](primals_2, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_3
del primals_4
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, buf1, reinterpret_tensor(primals_5,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_5
del primals_6
buf4 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 12), (1, 4), 0), out=buf4)
del primals_7
buf5 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_9, (4, 12), (1, 4), 0), out=buf5)
del primals_9
buf6 = reinterpret_tensor(buf4, (4, 4, 12), (48, 12, 1), 0)
del buf4
buf61 = empty_strided_cuda((4, 4, 12), (48, 12, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(192)](buf6,
primals_8, buf61, 192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_8
buf7 = reinterpret_tensor(buf5, (4, 4, 12), (48, 12, 1), 0)
del buf5
buf60 = empty_strided_cuda((4, 4, 12), (48, 12, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(192)](buf7,
primals_10, buf60, 192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_10
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf20 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_mul_2[grid(64)](buf3, buf6, buf8, buf9, buf20,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_bmm_3[grid(16)](buf8, buf10, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf11 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
triton_poi_fused_bmm_3[grid(16)](buf9, buf11, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf10, buf11, out=buf12)
buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf21 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_mul_2[grid(64)](buf2, buf7, buf13, buf14,
buf21, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf15 = reinterpret_tensor(buf11, (4, 4, 1), (4, 1, 16), 0)
del buf11
triton_poi_fused_bmm_3[grid(16)](buf13, buf15, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf16 = reinterpret_tensor(buf10, (4, 1, 4), (4, 16, 1), 0)
del buf10
triton_poi_fused_bmm_3[grid(16)](buf14, buf16, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf17 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf15, buf16, out=buf17)
buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(64)](buf12, buf18, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(64)](buf17, buf19, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf22 = reinterpret_tensor(buf16, (4, 4, 1), (4, 1, 16), 0)
del buf16
triton_poi_fused_bmm_5[grid(16)](buf8, buf22, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf23 = reinterpret_tensor(buf15, (4, 1, 4), (4, 16, 1), 0)
del buf15
triton_poi_fused_bmm_5[grid(16)](buf9, buf23, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf24 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf22, buf23, out=buf24)
buf25 = reinterpret_tensor(buf23, (4, 4, 1), (4, 1, 16), 0)
del buf23
triton_poi_fused_bmm_5[grid(16)](buf13, buf25, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf26 = reinterpret_tensor(buf22, (4, 1, 4), (4, 16, 1), 0)
del buf22
triton_poi_fused_bmm_5[grid(16)](buf14, buf26, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf27 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf25, buf26, out=buf27)
buf28 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(64)](buf24, buf28, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf29 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(64)](buf27, buf29, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf30 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
triton_poi_fused_cat_6[grid(32)](buf18, buf20, buf28, buf30, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf31 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
triton_poi_fused_cat_6[grid(32)](buf19, buf21, buf29, buf31, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf32 = reinterpret_tensor(buf26, (4, 4, 1), (4, 1, 16), 0)
del buf26
triton_poi_fused_bmm_7[grid(16)](buf8, buf32, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf33 = reinterpret_tensor(buf25, (4, 1, 4), (4, 16, 1), 0)
del buf25
triton_poi_fused_bmm_7[grid(16)](buf9, buf33, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf34 = buf29
del buf29
extern_kernels.bmm(buf32, buf33, out=buf34)
buf35 = reinterpret_tensor(buf33, (4, 4, 1), (4, 1, 16), 0)
del buf33
triton_poi_fused_bmm_7[grid(16)](buf13, buf35, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf36 = reinterpret_tensor(buf32, (4, 1, 4), (4, 16, 1), 0)
del buf32
triton_poi_fused_bmm_7[grid(16)](buf14, buf36, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf37 = buf19
del buf19
extern_kernels.bmm(buf35, buf36, out=buf37)
buf38 = buf28
del buf28
triton_poi_fused__softmax_4[grid(64)](buf34, buf38, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf39 = buf18
del buf18
triton_poi_fused__softmax_4[grid(64)](buf37, buf39, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf40 = empty_strided_cuda((4, 4, 3), (12, 3, 1), torch.float32)
triton_poi_fused_cat_8[grid(48)](buf30, buf38, buf20, buf40, 48,
XBLOCK=64, num_warps=1, num_stages=1)
del buf30
buf41 = empty_strided_cuda((4, 4, 3), (12, 3, 1), torch.float32)
triton_poi_fused_cat_8[grid(48)](buf31, buf39, buf21, buf41, 48,
XBLOCK=64, num_warps=1, num_stages=1)
del buf31
buf42 = reinterpret_tensor(buf36, (4, 4, 1), (4, 1, 16), 0)
del buf36
triton_poi_fused_bmm_9[grid(16)](buf8, buf42, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf43 = reinterpret_tensor(buf35, (4, 1, 4), (4, 16, 1), 0)
del buf35
triton_poi_fused_bmm_9[grid(16)](buf9, buf43, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf44 = buf39
del buf39
extern_kernels.bmm(buf42, buf43, out=buf44)
buf45 = reinterpret_tensor(buf43, (4, 4, 1), (4, 1, 16), 0)
del buf43
triton_poi_fused_bmm_9[grid(16)](buf13, buf45, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf46 = reinterpret_tensor(buf42, (4, 1, 4), (4, 16, 1), 0)
del buf42
triton_poi_fused_bmm_9[grid(16)](buf14, buf46, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf47 = buf38
del buf38
extern_kernels.bmm(buf45, buf46, out=buf47)
del buf45
del buf46
buf48 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(64)](buf44, buf48, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf49 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(64)](buf47, buf49, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf50 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf52 = buf50
del buf50
triton_poi_fused_add_cat_10[grid(64)](buf52, buf40, buf48, buf20,
primals_1, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf40
buf51 = buf48
del buf48
buf55 = buf51
del buf51
triton_poi_fused_add_cat_10[grid(64)](buf55, buf41, buf49, buf21,
primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf41
buf53 = reinterpret_tensor(buf49, (16, 4), (4, 1), 0)
del buf49
extern_kernels.mm(reinterpret_tensor(buf52, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf53)
buf54 = reinterpret_tensor(buf53, (4, 4, 4), (16, 4, 1), 0)
del buf53
buf59 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_11[grid(64)](buf54,
primals_12, buf59, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_12
buf56 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf55, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf56)
buf57 = reinterpret_tensor(buf56, (4, 4, 4), (16, 4, 1), 0)
del buf56
buf58 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_11[grid(64)](buf57,
primals_14, buf58, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_14
return buf54, buf57, buf0, buf1, buf2, buf3, reinterpret_tensor(primals_1,
(16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0
), reinterpret_tensor(buf6, (4, 4, 4), (48, 12, 1), 0
), reinterpret_tensor(buf6, (4, 4, 4), (48, 12, 1), 4
), reinterpret_tensor(buf6, (4, 4, 4), (48, 12, 1), 8
), reinterpret_tensor(buf7, (4, 4, 4), (48, 12, 1), 0
), reinterpret_tensor(buf7, (4, 4, 4), (48, 12, 1), 4
), reinterpret_tensor(buf7, (4, 4, 4), (48, 12, 1), 8
), buf12, buf17, reinterpret_tensor(buf20, (4, 1, 4, 1), (16, 16, 4,
1), 0), reinterpret_tensor(buf21, (4, 1, 4, 1), (16, 16, 4, 1), 0
), buf24, buf27, reinterpret_tensor(buf20, (4, 1, 4, 1), (16, 16, 4,
1), 1), reinterpret_tensor(buf21, (4, 1, 4, 1), (16, 16, 4, 1), 1
), buf34, buf37, reinterpret_tensor(buf20, (4, 1, 4, 1), (16, 16, 4,
1), 2), reinterpret_tensor(buf21, (4, 1, 4, 1), (16, 16, 4, 1), 2
), buf44, buf47, reinterpret_tensor(buf20, (4, 1, 4, 1), (16, 16, 4,
1), 3), reinterpret_tensor(buf21, (4, 1, 4, 1), (16, 16, 4, 1), 3
), reinterpret_tensor(buf52, (16, 4), (4, 1), 0), reinterpret_tensor(
buf55, (16, 4), (4, 1), 0
), buf58, primals_13, buf59, primals_11, reinterpret_tensor(buf13,
(4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf14, (4, 4, 1), (16,
4, 1), 3), reinterpret_tensor(buf8, (4, 1, 4), (16, 1, 4), 3
), reinterpret_tensor(buf9, (4, 4, 1), (16, 4, 1), 3
), reinterpret_tensor(buf13, (4, 1, 4), (16, 1, 4), 2
), reinterpret_tensor(buf14, (4, 4, 1), (16, 4, 1), 2
), reinterpret_tensor(buf8, (4, 1, 4), (16, 1, 4), 2
), reinterpret_tensor(buf9, (4, 4, 1), (16, 4, 1), 2
), reinterpret_tensor(buf13, (4, 1, 4), (16, 1, 4), 1
), reinterpret_tensor(buf14, (4, 4, 1), (16, 4, 1), 1
), reinterpret_tensor(buf8, (4, 1, 4), (16, 1, 4), 1
), reinterpret_tensor(buf9, (4, 4, 1), (16, 4, 1), 1
), reinterpret_tensor(buf13, (4, 1, 4), (16, 1, 4), 0
), reinterpret_tensor(buf14, (4, 4, 1), (16, 4, 1), 0
), reinterpret_tensor(buf8, (4, 1, 4), (16, 1, 4), 0
), reinterpret_tensor(buf9, (4, 4, 1), (16, 4, 1), 0), buf60, buf61
class FCNet(nn.Module):
def __init__(self, in_size, out_size, activate=None, drop=0.0):
super(FCNet, self).__init__()
self.lin = nn.Linear(in_size, out_size)
self.drop_value = drop
self.drop = nn.Dropout(drop)
self.activate = activate.lower() if activate is not None else None
if activate == 'relu':
self.ac_fn = nn.ReLU()
elif activate == 'sigmoid':
self.ac_fn = nn.Sigmoid()
elif activate == 'tanh':
self.ac_fn = nn.Tanh()
def forward(self, x):
if self.drop_value > 0:
x = self.drop(x)
x = self.lin(x)
if self.activate is not None:
x = self.ac_fn(x)
return x
class DyIntraModalityUpdateNew(nn.Module):
"""
Dynamic Intra-Modality Attention Flow
"""
def __init__(self, v_size, q_size, output_size, num_head, drop=0.0):
super(DyIntraModalityUpdateNew, self).__init__()
self.v_size = v_size
self.q_size = q_size
self.output_size = output_size
self.num_head = num_head
self.v4q_gate_lin = FCNet(v_size, output_size, drop=drop)
self.q4v_gate_lin = FCNet(q_size, output_size, drop=drop)
self.v_lin = FCNet(v_size, output_size * 3, drop=drop, activate='relu')
self.q_lin = FCNet(q_size, output_size * 3, drop=drop, activate='relu')
self.v_output = FCNet(output_size, output_size, drop=drop, activate
='relu')
self.q_output = FCNet(output_size, output_size, drop=drop, activate
='relu')
self.relu = nn.ReLU()
self.tanh = nn.Tanh()
self.sigmoid = nn.Sigmoid()
def forward(self, input_0, input_1):
primals_3 = self.v4q_gate_lin.lin.weight
primals_4 = self.v4q_gate_lin.lin.bias
primals_5 = self.q4v_gate_lin.lin.weight
primals_6 = self.q4v_gate_lin.lin.bias
primals_7 = self.v_lin.lin.weight
primals_8 = self.v_lin.lin.bias
primals_9 = self.q_lin.lin.weight
primals_10 = self.q_lin.lin.bias
primals_11 = self.v_output.lin.weight
primals_12 = self.v_output.lin.bias
primals_13 = self.q_output.lin.weight
primals_14 = self.q_output.lin.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14])
return output[0], output[1]
| Ruiver/CTCNet | DyIntraModalityUpdate | false | 17,903 | [
"Apache-2.0"
] | 6 | 539e55ec9fed06028379d35dfd5cd4074755ffd8 | https://github.com/Ruiver/CTCNet/tree/539e55ec9fed06028379d35dfd5cd4074755ffd8 |
ResnetDecoder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/rk/crk3wo355zdwinxertbphzehdv47fbdvcqn3ltnkzddbyyox3kvo.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# x => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%unsqueeze, [-1, -2], True), kwargs = {})
triton_poi_fused_mean_0 = async_compile.triton('triton_poi_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_poi_fused_mean_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (4, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_2
del primals_3
return (buf1, reinterpret_tensor(buf0, (4, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class ResnetDecoder(nn.Module):
"""
This class represents the tail of ResNet. It performs a global pooling and maps the output to the
correct class by using a fully connected layer.
"""
def __init__(self, in_features, n_classes):
super().__init__()
self.avg = nn.AdaptiveAvgPool1d((1,))
self.decoder = nn.Linear(in_features, n_classes)
def forward(self, x):
x = self.avg(x)
x = x.view(x.size(0), -1)
x = self.decoder(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'n_classes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (4, 4), (4,
1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha
=1, beta=1, out=buf1)
del primals_2
del primals_3
return buf1, reinterpret_tensor(buf0, (4, 4), (4, 1), 0)
class ResnetDecoderNew(nn.Module):
"""
This class represents the tail of ResNet. It performs a global pooling and maps the output to the
correct class by using a fully connected layer.
"""
def __init__(self, in_features, n_classes):
super().__init__()
self.avg = nn.AdaptiveAvgPool1d((1,))
self.decoder = nn.Linear(in_features, n_classes)
def forward(self, input_0):
primals_2 = self.decoder.weight
primals_3 = self.decoder.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| SeffyVon/ECG_MICResNet | ResnetDecoder | false | 17,904 | [
"BSD-3-Clause"
] | 5 | 8c6a319b5822ddfb130738eb1d9cdc3c21b24209 | https://github.com/SeffyVon/ECG_MICResNet/tree/8c6a319b5822ddfb130738eb1d9cdc3c21b24209 |
Net | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/nd/cndrb2by2g3ednlxsjtytxjhjdm7bdlrgvzzodhsxvxnpml2di7p.py
# Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# out => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/yf/cyf4onw64kisylvpovi56es47z2xblpjc7lad7245qvttvjjn43d.py
# Topologically Sorted Source Nodes: [conv2d_4, out_4], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_4 => convolution_4
# out_4 => relu_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_3, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/sz/cszej4maagr3mbrr632toipwysq33ntyrkwik4rqx6547nec6w3b.py
# Topologically Sorted Source Nodes: [conv2d_5, out_5], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_5 => convolution_5
# out_5 => relu_5
# Graph fragment:
# %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_4, %primals_12, %primals_13, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_5,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_5, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), None)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = 0.0
tmp7 = tmp5 <= tmp6
tl.store(in_out_ptr0 + (x0), tmp5, None)
tl.store(out_ptr0 + (x0), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/fa/cfautuql4pr724m35hmackl27g6tyaili7gj5pm3hpkjeaj5qeam.py
# Topologically Sorted Source Nodes: [out_7], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# out_7 => convolution_6
# Graph fragment:
# %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view_1, %primals_14, %primals_15, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_3 = async_compile.triton('triton_poi_fused_convolution_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), None)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + (x0), tmp3, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15 = args
args.clear()
assert_size_stride(primals_1, (64, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (64, ), (1, ))
assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_9, (64, ), (1, ))
assert_size_stride(primals_10, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (32, ), (1, ))
assert_size_stride(primals_12, (1, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_13, (1, ), (1, ))
assert_size_stride(primals_14, (1, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_15, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 1048576, grid=grid(1048576), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, out_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_0.run(buf3, primals_5, 1048576, grid=grid(1048576), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, out_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_0.run(buf5, primals_7, 1048576, grid=grid(1048576), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [conv2d_3, out_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_0.run(buf7, primals_9, 1048576, grid=grid(1048576), stream=stream0)
del primals_9
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [conv2d_4, out_4], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf9, primals_11, 524288, grid=grid(524288), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf11 = reinterpret_tensor(buf10, (4, 1, 64, 64), (4096, 1, 64, 1), 0); del buf10 # reuse
buf14 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_5, out_5], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_2.run(buf11, primals_13, buf14, 16384, grid=grid(16384), stream=stream0)
del primals_13
# Topologically Sorted Source Nodes: [out_7], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(reinterpret_tensor(buf11, (4, 1, 64, 64), (4096, 0, 64, 1), 0), primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [out_7], Original ATen: [aten.convolution]
triton_poi_fused_convolution_3.run(buf13, primals_15, 16384, grid=grid(16384), stream=stream0)
del primals_15
return (buf13, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, buf1, buf3, buf5, buf7, buf9, reinterpret_tensor(buf11, (4, 1, 64, 64), (4096, 4096, 64, 1), 0), buf14, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 1, 5, 5), (25, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((32, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((1, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((1, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.init as init
class Net(nn.Module):
def __init__(self, upscale_factor):
super(Net, self).__init__()
self.upscale_factor = int(upscale_factor)
self.relu = nn.ReLU()
self.conv1 = nn.Conv2d(1, 64, kernel_size=5, padding=2)
self.conv2_1 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.conv2_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.conv2_3 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(64, 32, kernel_size=3, padding=1)
self.conv4 = nn.Conv2d(32, int(self.upscale_factor ** 2),
kernel_size=3, padding=1)
self.pixel_shuffle = nn.PixelShuffle(self.upscale_factor)
self.conv5 = nn.Conv2d(1, 1, kernel_size=1, padding=0)
self.weight_init()
def forward(self, x):
out = self.relu(self.conv1(x))
out = self.relu(self.conv2_1(out))
out = self.relu(self.conv2_2(out))
out = self.relu(self.conv2_3(out))
out = self.relu(self.conv3(out))
out = self.relu(self.conv4(out))
out = self.pixel_shuffle(out)
out = self.conv5(out)
return out
def weight_init(self):
init.orthogonal_(self.conv1.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv2_1.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv2_2.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv2_3.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv3.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv4.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv5.weight)
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {'upscale_factor': 1.0}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.init as init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_2(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = 0.0
tmp7 = tmp5 <= tmp6
tl.store(in_out_ptr0 + x0, tmp5, None)
tl.store(out_ptr0 + x0, tmp7, None)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15) = args
args.clear()
assert_size_stride(primals_1, (64, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_9, (64,), (1,))
assert_size_stride(primals_10, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (32,), (1,))
assert_size_stride(primals_12, (1, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_13, (1,), (1,))
assert_size_stride(primals_14, (1, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_15, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(1048576)](buf1, primals_2,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_0[grid(1048576)](buf3, primals_5,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_0[grid(1048576)](buf5, primals_7,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_0[grid(1048576)](buf7, primals_9,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_9
buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_1[grid(524288)](buf9, primals_11,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_11
buf10 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf11 = reinterpret_tensor(buf10, (4, 1, 64, 64), (4096, 1, 64, 1), 0)
del buf10
buf14 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_2[grid(16384)](
buf11, primals_13, buf14, 16384, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_13
buf12 = extern_kernels.convolution(reinterpret_tensor(buf11, (4, 1,
64, 64), (4096, 0, 64, 1), 0), primals_14, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_3[grid(16384)](buf13, primals_15,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_15
return (buf13, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, buf1, buf3, buf5, buf7, buf9,
reinterpret_tensor(buf11, (4, 1, 64, 64), (4096, 4096, 64, 1), 0),
buf14)
class NetNew(nn.Module):
def __init__(self, upscale_factor):
super(NetNew, self).__init__()
self.upscale_factor = int(upscale_factor)
self.relu = nn.ReLU()
self.conv1 = nn.Conv2d(1, 64, kernel_size=5, padding=2)
self.conv2_1 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.conv2_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.conv2_3 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(64, 32, kernel_size=3, padding=1)
self.conv4 = nn.Conv2d(32, int(self.upscale_factor ** 2),
kernel_size=3, padding=1)
self.pixel_shuffle = nn.PixelShuffle(self.upscale_factor)
self.conv5 = nn.Conv2d(1, 1, kernel_size=1, padding=0)
self.weight_init()
def weight_init(self):
init.orthogonal_(self.conv1.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv2_1.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv2_2.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv2_3.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv3.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv4.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv5.weight)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2_1.weight
primals_5 = self.conv2_1.bias
primals_6 = self.conv2_2.weight
primals_7 = self.conv2_2.bias
primals_8 = self.conv2_3.weight
primals_9 = self.conv2_3.bias
primals_10 = self.conv3.weight
primals_11 = self.conv3.bias
primals_12 = self.conv4.weight
primals_13 = self.conv4.bias
primals_14 = self.conv5.weight
primals_15 = self.conv5.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15])
return output[0]
| PiSchool/esa-superresolution-forecasting | Net | false | 17,905 | [
"MIT"
] | 4 | 3c01770dd64749d6b6c40e1068a96a3307c8c035 | https://github.com/PiSchool/esa-superresolution-forecasting/tree/3c01770dd64749d6b6c40e1068a96a3307c8c035 |
OneSideInterModalityUpdate | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/6l/c6l4fprbhfxpynsntqswtoln4ycxaz7nusbahlnzatzxoczbazu2.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_3 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/2f/c2fj5dhsathjuxxup3fozy7htkpitgs5wbgrcs5a6rztuaaewsio.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/jo/cjoggvzfjdxjpadnpp5ofakptfu5mz3lrferdnbpwoiylgw6ynjl.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
# Source node to ATen node mapping:
# matmul => bmm
# Graph fragment:
# %bmm : [num_users=2] = call_function[target=torch.ops.aten.bmm.default](args = (%expand, %expand_1), kwargs = {})
triton_poi_fused_bmm_2 = async_compile.triton('triton_poi_fused_bmm_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_bmm_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/h5/ch5seitw6dolhcnbibjuespt37el54g7eq64x7df7iw5ujamf6qg.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
# Source node to ATen node mapping:
# matmul => bmm
# Graph fragment:
# %bmm : [num_users=2] = call_function[target=torch.ops.aten.bmm.default](args = (%expand, %expand_1), kwargs = {})
triton_poi_fused_bmm_3 = async_compile.triton('triton_poi_fused_bmm_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_bmm_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (8*x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/re/cre7w75rug2us4dh57l67bx5rwo3jz6peysf4en72be5wnd6jfnk.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => exp
# Graph fragment:
# %mul_tensor_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {})
# %amax_default_3 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_3, [2], True), kwargs = {})
# %sub_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_3, %amax_default_3), kwargs = {})
# %div_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_3, 1.0), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_3,), kwargs = {})
triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ws/cwsjjvqyab6dnbgvlapznnpbpik6q6whfdh34725bhmfoo7lmth5.py
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
# Source node to ATen node mapping:
# matmul_1 => bmm_1
# Graph fragment:
# %bmm_1 : [num_users=2] = call_function[target=torch.ops.aten.bmm.default](args = (%expand_2, %expand_3), kwargs = {})
triton_poi_fused_bmm_5 = async_compile.triton('triton_poi_fused_bmm_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_bmm_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/gl/cglxxckwehgwttrtfhbu4evqtgpkwhunxp6rm4er36k4wysvzciv.py
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
# Source node to ATen node mapping:
# matmul_1 => bmm_1
# Graph fragment:
# %bmm_1 : [num_users=2] = call_function[target=torch.ops.aten.bmm.default](args = (%expand_2, %expand_3), kwargs = {})
triton_poi_fused_bmm_6 = async_compile.triton('triton_poi_fused_bmm_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_bmm_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (1 + (8*x0)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/m6/cm67jfqadwz3u4py6sh52zhq2ila5k5rejwr67gfrglpgtp7e5t2.py
# Topologically Sorted Source Nodes: [tgt_update_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# tgt_update_1 => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%sum_2, %sum_4], 2), kwargs = {})
triton_poi_fused_cat_7 = async_compile.triton('triton_poi_fused_cat_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x3 = (xindex // 2)
x2 = (xindex // 8)
x5 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4*x3), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (1 + (4*x3)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr0 + (2 + (4*x3)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp9 = tmp7 + tmp8
tmp10 = tl.load(in_ptr0 + (3 + (4*x3)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tmp5 / tmp11
tmp13 = tl.load(in_ptr1 + (4 + (32*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp14 = tmp12 * tmp13
tmp15 = tmp6 / tmp11
tmp16 = tl.load(in_ptr1 + (12 + (32*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp19 = tmp8 / tmp11
tmp20 = tl.load(in_ptr1 + (20 + (32*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp10 / tmp11
tmp24 = tl.load(in_ptr1 + (28 + (32*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype)
tmp28 = tl.where(tmp4, tmp26, tmp27)
tmp29 = tmp0 >= tmp3
tmp30 = tl.full([1], 2, tl.int64)
tmp31 = tmp0 < tmp30
tmp32 = tl.load(in_ptr2 + (4*x3), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp33 = tl.load(in_ptr2 + (1 + (4*x3)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp34 = tmp32 + tmp33
tmp35 = tl.load(in_ptr2 + (2 + (4*x3)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp36 = tmp34 + tmp35
tmp37 = tl.load(in_ptr2 + (3 + (4*x3)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp38 = tmp36 + tmp37
tmp39 = tmp32 / tmp38
tmp40 = tl.load(in_ptr1 + (5 + (32*x2)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp41 = tmp39 * tmp40
tmp42 = tmp33 / tmp38
tmp43 = tl.load(in_ptr1 + (13 + (32*x2)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp44 = tmp42 * tmp43
tmp45 = tmp41 + tmp44
tmp46 = tmp35 / tmp38
tmp47 = tl.load(in_ptr1 + (21 + (32*x2)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp48 = tmp46 * tmp47
tmp49 = tmp45 + tmp48
tmp50 = tmp37 / tmp38
tmp51 = tl.load(in_ptr1 + (29 + (32*x2)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp52 = tmp50 * tmp51
tmp53 = tmp49 + tmp52
tmp54 = tl.full(tmp53.shape, 0.0, tmp53.dtype)
tmp55 = tl.where(tmp29, tmp53, tmp54)
tmp56 = tl.where(tmp4, tmp28, tmp55)
tl.store(out_ptr0 + (x5), tmp56, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/mw/cmwa6qncsm3erytuawise3ilobvd7v6tjha2yp5fgvgdtwoldccf.py
# Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.bmm]
# Source node to ATen node mapping:
# matmul_2 => bmm_2
# Graph fragment:
# %bmm_2 : [num_users=2] = call_function[target=torch.ops.aten.bmm.default](args = (%expand_4, %expand_5), kwargs = {})
triton_poi_fused_bmm_8 = async_compile.triton('triton_poi_fused_bmm_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_bmm_8(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/kf/ckfvrmau3qvtmpxht5u6hqk674ml4xalodlhnlekfyuf3ogcp2ew.py
# Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.bmm]
# Source node to ATen node mapping:
# matmul_2 => bmm_2
# Graph fragment:
# %bmm_2 : [num_users=2] = call_function[target=torch.ops.aten.bmm.default](args = (%expand_4, %expand_5), kwargs = {})
triton_poi_fused_bmm_9 = async_compile.triton('triton_poi_fused_bmm_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_bmm_9(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (2 + (8*x0)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/hk/chkbi72btw2o4crqgqumkqeu7mlliqptlaicytt4by4ef66pcjkz.py
# Topologically Sorted Source Nodes: [tgt_update_2], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# tgt_update_2 => cat_1
# Graph fragment:
# %cat_1 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%cat, %sum_6], 2), kwargs = {})
triton_poi_fused_cat_10 = async_compile.triton('triton_poi_fused_cat_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_10(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x3 = (xindex // 3)
x2 = (xindex // 12)
x5 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((2*x3) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 3, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (4*x3), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.load(in_ptr1 + (1 + (4*x3)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.load(in_ptr1 + (2 + (4*x3)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.load(in_ptr1 + (3 + (4*x3)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tmp13 + tmp14
tmp16 = tmp9 / tmp15
tmp17 = tl.load(in_ptr2 + (6 + (32*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tmp16 * tmp17
tmp19 = tmp10 / tmp15
tmp20 = tl.load(in_ptr2 + (14 + (32*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp12 / tmp15
tmp24 = tl.load(in_ptr2 + (22 + (32*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tmp14 / tmp15
tmp28 = tl.load(in_ptr2 + (30 + (32*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp29 = tmp27 * tmp28
tmp30 = tmp26 + tmp29
tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype)
tmp32 = tl.where(tmp6, tmp30, tmp31)
tmp33 = tl.where(tmp4, tmp5, tmp32)
tl.store(out_ptr0 + (x5), tmp33, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/7x/c7xbq5fhfi2dzouz4yrwinyaih7fon4pmkcvadymisvb4bb3dxbh.py
# Topologically Sorted Source Nodes: [matmul_3], Original ATen: [aten.bmm]
# Source node to ATen node mapping:
# matmul_3 => bmm_3
# Graph fragment:
# %bmm_3 : [num_users=2] = call_function[target=torch.ops.aten.bmm.default](args = (%expand_6, %expand_7), kwargs = {})
triton_poi_fused_bmm_11 = async_compile.triton('triton_poi_fused_bmm_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_bmm_11(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/zj/czjynrijjloto7mlsykx3q2tuhgf54flqdmbhytjo2bmohpz3kpu.py
# Topologically Sorted Source Nodes: [matmul_3], Original ATen: [aten.bmm]
# Source node to ATen node mapping:
# matmul_3 => bmm_3
# Graph fragment:
# %bmm_3 : [num_users=2] = call_function[target=torch.ops.aten.bmm.default](args = (%expand_6, %expand_7), kwargs = {})
triton_poi_fused_bmm_12 = async_compile.triton('triton_poi_fused_bmm_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_bmm_12(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (3 + (8*x0)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/3p/c3pvaukrtq7usmings2evfjempleqpomp4ecul7jyjfzpw7yajqy.py
# Topologically Sorted Source Nodes: [tgt_update_3], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# tgt_update_3 => cat_2
# Graph fragment:
# %cat_2 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%cat_1, %sum_8], 2), kwargs = {})
triton_poi_fused_cat_13 = async_compile.triton('triton_poi_fused_cat_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_13(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x3 = (xindex // 4)
x2 = (xindex // 16)
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 3, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((3*x3) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 4, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (4*x3), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.load(in_ptr1 + (1 + (4*x3)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.load(in_ptr1 + (2 + (4*x3)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.load(in_ptr1 + (3 + (4*x3)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tmp13 + tmp14
tmp16 = tmp9 / tmp15
tmp17 = tl.load(in_ptr2 + (7 + (32*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tmp16 * tmp17
tmp19 = tmp10 / tmp15
tmp20 = tl.load(in_ptr2 + (15 + (32*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp12 / tmp15
tmp24 = tl.load(in_ptr2 + (23 + (32*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tmp14 / tmp15
tmp28 = tl.load(in_ptr2 + (31 + (32*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp29 = tmp27 * tmp28
tmp30 = tmp26 + tmp29
tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype)
tmp32 = tl.where(tmp6, tmp30, tmp31)
tmp33 = tl.where(tmp4, tmp5, tmp32)
tl.store(out_ptr0 + (x0 + (8*x3)), tmp33, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/p2/cp2vynlbthpd2wgcfg75zvxlrhq2lg7sglmkdkngc7csmbcslyb3.py
# Topologically Sorted Source Nodes: [cat_tgt], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_tgt => cat_3
# Graph fragment:
# %cat_3 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_2, %cat_2], 2), kwargs = {})
triton_poi_fused_cat_14 = async_compile.triton('triton_poi_fused_cat_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_14(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tl.store(out_ptr0 + (x0 + (8*x1)), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (8, 4), (4, 1))
assert_size_stride(primals_4, (8, ), (1, ))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, 8), (8, 1))
assert_size_stride(primals_8, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 8), (1, 4), 0), out=buf0)
del primals_3
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1)
del primals_5
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse
buf28 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf2, primals_6, buf28, 64, grid=grid(64), stream=stream0)
del primals_6
buf3 = reinterpret_tensor(buf0, (4, 4, 8), (32, 8, 1), 0); del buf0 # reuse
buf29 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_4, buf29, 128, grid=grid(128), stream=stream0)
del primals_4
buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
triton_poi_fused_bmm_2.run(buf2, buf4, 16, grid=grid(16), stream=stream0)
buf5 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
triton_poi_fused_bmm_3.run(buf3, buf5, 16, grid=grid(16), stream=stream0)
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(buf4, buf5, out=buf6)
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf6, buf7, 64, grid=grid(64), stream=stream0)
buf8 = reinterpret_tensor(buf5, (4, 4, 1), (4, 1, 16), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
triton_poi_fused_bmm_5.run(buf2, buf8, 16, grid=grid(16), stream=stream0)
buf9 = reinterpret_tensor(buf4, (4, 1, 4), (4, 16, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
triton_poi_fused_bmm_6.run(buf3, buf9, 16, grid=grid(16), stream=stream0)
buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
extern_kernels.bmm(buf8, buf9, out=buf10)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf10, buf11, 64, grid=grid(64), stream=stream0)
buf12 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [tgt_update_1], Original ATen: [aten.cat]
triton_poi_fused_cat_7.run(buf7, buf3, buf11, buf12, 32, grid=grid(32), stream=stream0)
buf13 = reinterpret_tensor(buf9, (4, 4, 1), (4, 1, 16), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.bmm]
triton_poi_fused_bmm_8.run(buf2, buf13, 16, grid=grid(16), stream=stream0)
buf14 = reinterpret_tensor(buf8, (4, 1, 4), (4, 16, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.bmm]
triton_poi_fused_bmm_9.run(buf3, buf14, 16, grid=grid(16), stream=stream0)
buf15 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.bmm]
extern_kernels.bmm(buf13, buf14, out=buf15)
buf16 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [softmax_2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf15, buf16, 64, grid=grid(64), stream=stream0)
buf17 = empty_strided_cuda((4, 4, 3), (12, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [tgt_update_2], Original ATen: [aten.cat]
triton_poi_fused_cat_10.run(buf12, buf16, buf3, buf17, 48, grid=grid(48), stream=stream0)
del buf12
buf18 = reinterpret_tensor(buf14, (4, 4, 1), (4, 1, 16), 0); del buf14 # reuse
# Topologically Sorted Source Nodes: [matmul_3], Original ATen: [aten.bmm]
triton_poi_fused_bmm_11.run(buf2, buf18, 16, grid=grid(16), stream=stream0)
buf19 = reinterpret_tensor(buf13, (4, 1, 4), (4, 16, 1), 0); del buf13 # reuse
# Topologically Sorted Source Nodes: [matmul_3], Original ATen: [aten.bmm]
triton_poi_fused_bmm_12.run(buf3, buf19, 16, grid=grid(16), stream=stream0)
buf20 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [matmul_3], Original ATen: [aten.bmm]
extern_kernels.bmm(buf18, buf19, out=buf20)
del buf18
del buf19
buf21 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_3], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf20, buf21, 64, grid=grid(64), stream=stream0)
buf24 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
buf22 = reinterpret_tensor(buf24, (4, 4, 4), (32, 8, 1), 4) # alias
# Topologically Sorted Source Nodes: [tgt_update_3], Original ATen: [aten.cat]
triton_poi_fused_cat_13.run(buf17, buf21, buf3, buf22, 64, grid=grid(64), stream=stream0)
del buf17
buf23 = reinterpret_tensor(buf24, (4, 4, 4), (32, 8, 1), 0) # alias
# Topologically Sorted Source Nodes: [cat_tgt], Original ATen: [aten.cat]
triton_poi_fused_cat_14.run(primals_2, buf23, 64, grid=grid(64), stream=stream0)
buf25 = reinterpret_tensor(buf21, (16, 4), (4, 1), 0); del buf21 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf24, (16, 8), (8, 1), 0), reinterpret_tensor(primals_7, (8, 4), (1, 8), 0), out=buf25)
buf26 = reinterpret_tensor(buf25, (4, 4, 4), (16, 4, 1), 0); del buf25 # reuse
buf27 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf26, primals_8, buf27, 64, grid=grid(64), stream=stream0)
del primals_8
return (buf26, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), buf6, reinterpret_tensor(buf3, (4, 1, 4, 1), (32, 32, 8, 1), 4), buf10, reinterpret_tensor(buf3, (4, 1, 4, 1), (32, 32, 8, 1), 5), buf15, reinterpret_tensor(buf3, (4, 1, 4, 1), (32, 32, 8, 1), 6), buf20, reinterpret_tensor(buf3, (4, 1, 4, 1), (32, 32, 8, 1), 7), reinterpret_tensor(buf24, (16, 8), (8, 1), 0), buf27, primals_7, reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf3, (4, 4, 1), (32, 8, 1), 3), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf3, (4, 4, 1), (32, 8, 1), 2), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf3, (4, 4, 1), (32, 8, 1), 1), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf3, (4, 4, 1), (32, 8, 1), 0), buf28, buf29, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class FCNet(nn.Module):
def __init__(self, in_size, out_size, activate=None, drop=0.0):
super(FCNet, self).__init__()
self.lin = nn.Linear(in_size, out_size)
self.drop_value = drop
self.drop = nn.Dropout(drop)
self.activate = activate.lower() if activate is not None else None
if activate == 'relu':
self.ac_fn = nn.ReLU()
elif activate == 'sigmoid':
self.ac_fn = nn.Sigmoid()
elif activate == 'tanh':
self.ac_fn = nn.Tanh()
def forward(self, x):
if self.drop_value > 0:
x = self.drop(x)
x = self.lin(x)
if self.activate is not None:
x = self.ac_fn(x)
return x
class OneSideInterModalityUpdate(nn.Module):
"""
one-side Inter-Modality Attention Flow
according to the paper, instead of parallel V->Q & Q->V, we first to V->Q and then Q->V
"""
def __init__(self, src_size, tgt_size, output_size, num_head, drop=0.0):
super(OneSideInterModalityUpdate, self).__init__()
self.src_size = src_size
self.tgt_size = tgt_size
self.output_size = output_size
self.num_head = num_head
self.src_lin = FCNet(src_size, output_size * 2, drop=drop, activate
='relu')
self.tgt_lin = FCNet(tgt_size, output_size, drop=drop, activate='relu')
self.tgt_output = FCNet(output_size + tgt_size, output_size, drop=
drop, activate='relu')
def forward(self, src, tgt):
"""
:param src: eeg feature [batch, regions, feature_size]
:param tgt: eye feature [batch, regions, feature_size]
:return:
"""
_batch_size, _num_src = src.shape[0], src.shape[1]
tgt.shape[1]
src_tran = self.src_lin(src)
tgt_tran = self.tgt_lin(tgt)
src_key, src_val = torch.split(src_tran, src_tran.size(2) // 2, dim=2)
tgt_query = tgt_tran
src_key_set = torch.split(src_key, src_key.size(2) // self.num_head,
dim=2)
src_val_set = torch.split(src_val, src_val.size(2) // self.num_head,
dim=2)
tgt_query_set = torch.split(tgt_query, tgt_query.size(2) // self.
num_head, dim=2)
for i in range(self.num_head):
src_key_slice, tgt_query_slice, src_val_slice = src_key_set[i
], tgt_query_set[i], src_val_set[i]
src2tgt = tgt_query_slice @ src_key_slice.transpose(1, 2) / (self
.output_size // self.num_head) ** 0.5
interMAF_src2tgt = F.softmax(src2tgt, dim=2).unsqueeze(3)
tgt_update = (interMAF_src2tgt * src_val_slice.unsqueeze(1)).sum(2
) if i == 0 else torch.cat((tgt_update, (interMAF_src2tgt *
src_val_slice.unsqueeze(1)).sum(2)), dim=2)
cat_tgt = torch.cat((tgt, tgt_update), dim=2)
tgt_updated = self.tgt_output(cat_tgt)
return tgt_updated
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'src_size': 4, 'tgt_size': 4, 'output_size': 4, 'num_head': 4}
]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_bmm_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_bmm_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 8 * x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_bmm_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_bmm_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (1 + 8 * x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_cat_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x3 = xindex // 2
x2 = xindex // 8
x5 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + 4 * x3, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (1 + 4 * x3), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr0 + (2 + 4 * x3), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp9 = tmp7 + tmp8
tmp10 = tl.load(in_ptr0 + (3 + 4 * x3), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tmp5 / tmp11
tmp13 = tl.load(in_ptr1 + (4 + 32 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp14 = tmp12 * tmp13
tmp15 = tmp6 / tmp11
tmp16 = tl.load(in_ptr1 + (12 + 32 * x2), tmp4 & xmask, eviction_policy
='evict_last', other=0.0)
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp19 = tmp8 / tmp11
tmp20 = tl.load(in_ptr1 + (20 + 32 * x2), tmp4 & xmask, eviction_policy
='evict_last', other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp10 / tmp11
tmp24 = tl.load(in_ptr1 + (28 + 32 * x2), tmp4 & xmask, eviction_policy
='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype)
tmp28 = tl.where(tmp4, tmp26, tmp27)
tmp29 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp32 = tl.load(in_ptr2 + 4 * x3, tmp29 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp33 = tl.load(in_ptr2 + (1 + 4 * x3), tmp29 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp34 = tmp32 + tmp33
tmp35 = tl.load(in_ptr2 + (2 + 4 * x3), tmp29 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp36 = tmp34 + tmp35
tmp37 = tl.load(in_ptr2 + (3 + 4 * x3), tmp29 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp38 = tmp36 + tmp37
tmp39 = tmp32 / tmp38
tmp40 = tl.load(in_ptr1 + (5 + 32 * x2), tmp29 & xmask, eviction_policy
='evict_last', other=0.0)
tmp41 = tmp39 * tmp40
tmp42 = tmp33 / tmp38
tmp43 = tl.load(in_ptr1 + (13 + 32 * x2), tmp29 & xmask,
eviction_policy='evict_last', other=0.0)
tmp44 = tmp42 * tmp43
tmp45 = tmp41 + tmp44
tmp46 = tmp35 / tmp38
tmp47 = tl.load(in_ptr1 + (21 + 32 * x2), tmp29 & xmask,
eviction_policy='evict_last', other=0.0)
tmp48 = tmp46 * tmp47
tmp49 = tmp45 + tmp48
tmp50 = tmp37 / tmp38
tmp51 = tl.load(in_ptr1 + (29 + 32 * x2), tmp29 & xmask,
eviction_policy='evict_last', other=0.0)
tmp52 = tmp50 * tmp51
tmp53 = tmp49 + tmp52
tmp54 = tl.full(tmp53.shape, 0.0, tmp53.dtype)
tmp55 = tl.where(tmp29, tmp53, tmp54)
tmp56 = tl.where(tmp4, tmp28, tmp55)
tl.store(out_ptr0 + x5, tmp56, xmask)
@triton.jit
def triton_poi_fused_bmm_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_bmm_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (2 + 8 * x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_cat_10(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x3 = xindex // 3
x2 = xindex // 12
x5 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (2 * x3 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 3, tl.int64)
tmp9 = tl.load(in_ptr1 + 4 * x3, tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp10 = tl.load(in_ptr1 + (1 + 4 * x3), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.load(in_ptr1 + (2 + 4 * x3), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.load(in_ptr1 + (3 + 4 * x3), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tmp13 + tmp14
tmp16 = tmp9 / tmp15
tmp17 = tl.load(in_ptr2 + (6 + 32 * x2), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp18 = tmp16 * tmp17
tmp19 = tmp10 / tmp15
tmp20 = tl.load(in_ptr2 + (14 + 32 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp12 / tmp15
tmp24 = tl.load(in_ptr2 + (22 + 32 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tmp14 / tmp15
tmp28 = tl.load(in_ptr2 + (30 + 32 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp29 = tmp27 * tmp28
tmp30 = tmp26 + tmp29
tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype)
tmp32 = tl.where(tmp6, tmp30, tmp31)
tmp33 = tl.where(tmp4, tmp5, tmp32)
tl.store(out_ptr0 + x5, tmp33, xmask)
@triton.jit
def triton_poi_fused_bmm_11(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_bmm_12(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (3 + 8 * x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_cat_13(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x3 = xindex // 4
x2 = xindex // 16
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 3, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (3 * x3 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 4, tl.int64)
tmp9 = tl.load(in_ptr1 + 4 * x3, tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp10 = tl.load(in_ptr1 + (1 + 4 * x3), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.load(in_ptr1 + (2 + 4 * x3), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.load(in_ptr1 + (3 + 4 * x3), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tmp13 + tmp14
tmp16 = tmp9 / tmp15
tmp17 = tl.load(in_ptr2 + (7 + 32 * x2), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp18 = tmp16 * tmp17
tmp19 = tmp10 / tmp15
tmp20 = tl.load(in_ptr2 + (15 + 32 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp12 / tmp15
tmp24 = tl.load(in_ptr2 + (23 + 32 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tmp14 / tmp15
tmp28 = tl.load(in_ptr2 + (31 + 32 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp29 = tmp27 * tmp28
tmp30 = tmp26 + tmp29
tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype)
tmp32 = tl.where(tmp6, tmp30, tmp31)
tmp33 = tl.where(tmp4, tmp5, tmp32)
tl.store(out_ptr0 + (x0 + 8 * x3), tmp33, xmask)
@triton.jit
def triton_poi_fused_cat_14(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tl.store(out_ptr0 + (x0 + 8 * x1), tmp0, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (8, 4), (4, 1))
assert_size_stride(primals_4, (8,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 8), (8, 1))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 8), (1, 4), 0), out=buf0)
del primals_3
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1)
del primals_5
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
buf28 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(64)](buf2,
primals_6, buf28, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_6
buf3 = reinterpret_tensor(buf0, (4, 4, 8), (32, 8, 1), 0)
del buf0
buf29 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(128)](buf3,
primals_4, buf29, 128, XBLOCK=128, num_warps=4, num_stages=1)
del primals_4
buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_bmm_2[grid(16)](buf2, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
triton_poi_fused_bmm_3[grid(16)](buf3, buf5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf4, buf5, out=buf6)
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(64)](buf6, buf7, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf8 = reinterpret_tensor(buf5, (4, 4, 1), (4, 1, 16), 0)
del buf5
triton_poi_fused_bmm_5[grid(16)](buf2, buf8, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf4, (4, 1, 4), (4, 16, 1), 0)
del buf4
triton_poi_fused_bmm_6[grid(16)](buf3, buf9, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf8, buf9, out=buf10)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(64)](buf10, buf11, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
triton_poi_fused_cat_7[grid(32)](buf7, buf3, buf11, buf12, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf13 = reinterpret_tensor(buf9, (4, 4, 1), (4, 1, 16), 0)
del buf9
triton_poi_fused_bmm_8[grid(16)](buf2, buf13, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf14 = reinterpret_tensor(buf8, (4, 1, 4), (4, 16, 1), 0)
del buf8
triton_poi_fused_bmm_9[grid(16)](buf3, buf14, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf15 = buf7
del buf7
extern_kernels.bmm(buf13, buf14, out=buf15)
buf16 = buf11
del buf11
triton_poi_fused__softmax_4[grid(64)](buf15, buf16, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf17 = empty_strided_cuda((4, 4, 3), (12, 3, 1), torch.float32)
triton_poi_fused_cat_10[grid(48)](buf12, buf16, buf3, buf17, 48,
XBLOCK=64, num_warps=1, num_stages=1)
del buf12
buf18 = reinterpret_tensor(buf14, (4, 4, 1), (4, 1, 16), 0)
del buf14
triton_poi_fused_bmm_11[grid(16)](buf2, buf18, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf19 = reinterpret_tensor(buf13, (4, 1, 4), (4, 16, 1), 0)
del buf13
triton_poi_fused_bmm_12[grid(16)](buf3, buf19, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf20 = buf16
del buf16
extern_kernels.bmm(buf18, buf19, out=buf20)
del buf18
del buf19
buf21 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(64)](buf20, buf21, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf24 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
buf22 = reinterpret_tensor(buf24, (4, 4, 4), (32, 8, 1), 4)
triton_poi_fused_cat_13[grid(64)](buf17, buf21, buf3, buf22, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf17
buf23 = reinterpret_tensor(buf24, (4, 4, 4), (32, 8, 1), 0)
triton_poi_fused_cat_14[grid(64)](primals_2, buf23, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf25 = reinterpret_tensor(buf21, (16, 4), (4, 1), 0)
del buf21
extern_kernels.mm(reinterpret_tensor(buf24, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_7, (8, 4), (1, 8), 0), out=buf25)
buf26 = reinterpret_tensor(buf25, (4, 4, 4), (16, 4, 1), 0)
del buf25
buf27 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(64)](buf26,
primals_8, buf27, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_8
return buf26, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0
), buf6, reinterpret_tensor(buf3, (4, 1, 4, 1), (32, 32, 8, 1), 4
), buf10, reinterpret_tensor(buf3, (4, 1, 4, 1), (32, 32, 8, 1), 5
), buf15, reinterpret_tensor(buf3, (4, 1, 4, 1), (32, 32, 8, 1), 6
), buf20, reinterpret_tensor(buf3, (4, 1, 4, 1), (32, 32, 8, 1), 7
), reinterpret_tensor(buf24, (16, 8), (8, 1), 0
), buf27, primals_7, reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 3
), reinterpret_tensor(buf3, (4, 4, 1), (32, 8, 1), 3
), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 2
), reinterpret_tensor(buf3, (4, 4, 1), (32, 8, 1), 2
), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 1
), reinterpret_tensor(buf3, (4, 4, 1), (32, 8, 1), 1
), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 0
), reinterpret_tensor(buf3, (4, 4, 1), (32, 8, 1), 0), buf28, buf29
class FCNet(nn.Module):
def __init__(self, in_size, out_size, activate=None, drop=0.0):
super(FCNet, self).__init__()
self.lin = nn.Linear(in_size, out_size)
self.drop_value = drop
self.drop = nn.Dropout(drop)
self.activate = activate.lower() if activate is not None else None
if activate == 'relu':
self.ac_fn = nn.ReLU()
elif activate == 'sigmoid':
self.ac_fn = nn.Sigmoid()
elif activate == 'tanh':
self.ac_fn = nn.Tanh()
def forward(self, x):
if self.drop_value > 0:
x = self.drop(x)
x = self.lin(x)
if self.activate is not None:
x = self.ac_fn(x)
return x
class OneSideInterModalityUpdateNew(nn.Module):
"""
one-side Inter-Modality Attention Flow
according to the paper, instead of parallel V->Q & Q->V, we first to V->Q and then Q->V
"""
def __init__(self, src_size, tgt_size, output_size, num_head, drop=0.0):
super(OneSideInterModalityUpdateNew, self).__init__()
self.src_size = src_size
self.tgt_size = tgt_size
self.output_size = output_size
self.num_head = num_head
self.src_lin = FCNet(src_size, output_size * 2, drop=drop, activate
='relu')
self.tgt_lin = FCNet(tgt_size, output_size, drop=drop, activate='relu')
self.tgt_output = FCNet(output_size + tgt_size, output_size, drop=
drop, activate='relu')
def forward(self, input_0, input_1):
primals_3 = self.src_lin.lin.weight
primals_4 = self.src_lin.lin.bias
primals_5 = self.tgt_lin.lin.weight
primals_6 = self.tgt_lin.lin.bias
primals_7 = self.tgt_output.lin.weight
primals_8 = self.tgt_output.lin.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| Ruiver/CTCNet | OneSideInterModalityUpdate | false | 17,906 | [
"Apache-2.0"
] | 6 | 539e55ec9fed06028379d35dfd5cd4074755ffd8 | https://github.com/Ruiver/CTCNet/tree/539e55ec9fed06028379d35dfd5cd4074755ffd8 |
DiceLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/cs/ccsvnkti3aqfasbx7dbo7nr35tpiu5icq3cchvycu6z3tse4u6dq.py
# Topologically Sorted Source Nodes: [intersection, sum_1, sum_2, sum_3], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# intersection => mul
# sum_1 => sum_1
# sum_2 => sum_2
# sum_3 => sum_3
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view, [1]), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_1, [1]), kwargs = {})
triton_per_fused_mul_sum_0 = async_compile.triton('triton_per_fused_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + (64*x0)), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr1 + (x0), tmp10, xmask)
tl.store(out_ptr2 + (x0), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ux/cuxwy52ps3m3yawosd25cfvlsyqaihbtqd6gzcuzoaedpcmtsfs7.py
# Topologically Sorted Source Nodes: [add, mul_1, add_1, add_2, loss, sum_4, truediv_1, loss_1], Original ATen: [aten.add, aten.mul, aten.div, aten.sum, aten.rsub]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# loss => div
# loss_1 => sub
# mul_1 => mul_1
# sum_4 => sum_4
# truediv_1 => div_1
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, 1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %add_2), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%div,), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_4, 4), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div_1), kwargs = {})
triton_per_fused_add_div_mul_rsub_sum_1 = async_compile.triton('triton_per_fused_add_div_mul_rsub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_rsub_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp5 = tl.load(in_ptr1 + (r0), None)
tmp6 = tl.load(in_ptr2 + (r0), None)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp7 + tmp1
tmp9 = tmp4 / tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.sum(tmp10, 1)[:, None]
tmp13 = 0.25
tmp14 = tmp12 * tmp13
tmp15 = tmp1 - tmp14
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp15, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf1 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf2 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [intersection, sum_1, sum_2, sum_3], Original ATen: [aten.mul, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_mul_sum_0.run(arg1_1, arg0_1, buf0, buf1, buf2, 4, 64, grid=grid(4), stream=stream0)
del arg0_1
del arg1_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [add, mul_1, add_1, add_2, loss, sum_4, truediv_1, loss_1], Original ATen: [aten.add, aten.mul, aten.div, aten.sum, aten.rsub]
triton_per_fused_add_div_mul_rsub_sum_1.run(buf4, buf0, buf1, buf2, 1, 4, grid=grid(1), stream=stream0)
del buf0
del buf1
del buf2
return (buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class DiceLoss(nn.Module):
def __init__(self):
super(DiceLoss, self).__init__()
def forward(self, input, target):
N = target.size(0)
smooth = 1
input_flat = input.view(N, -1)
target_flat = target.view(N, -1)
intersection = input_flat * target_flat
loss = 2 * (intersection.sum(1) + smooth) / (input_flat.sum(1) +
target_flat.sum(1) + smooth)
loss = 1 - loss.sum() / N
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tl.store(out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr1 + x0, tmp10, xmask)
tl.store(out_ptr2 + x0, tmp14, xmask)
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp5 = tl.load(in_ptr1 + r0, None)
tmp6 = tl.load(in_ptr2 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp7 + tmp1
tmp9 = tmp4 / tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.sum(tmp10, 1)[:, None]
tmp13 = 0.25
tmp14 = tmp12 * tmp13
tmp15 = tmp1 - tmp14
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp15, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_mul_sum_0[grid(4)](arg1_1, arg0_1, buf0, buf1,
buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_per_fused_add_div_mul_rsub_sum_1[grid(1)](buf4, buf0, buf1,
buf2, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
del buf2
return buf4,
class DiceLossNew(nn.Module):
def __init__(self):
super(DiceLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| SeffyVon/ECG_MICResNet | DiceLoss | false | 17,907 | [
"BSD-3-Clause"
] | 5 | 8c6a319b5822ddfb130738eb1d9cdc3c21b24209 | https://github.com/SeffyVon/ECG_MICResNet/tree/8c6a319b5822ddfb130738eb1d9cdc3c21b24209 |
deepmind | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/r6/cr6bsqgh7rrlhldrhxtjhx4pvwivdp54lmpez74bccoinafqkzbh.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [4, 4], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 156800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 1225) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/6c/c6c6joipdf45jy2p3veqqwhctzigrpzsopgv7m6zwqilprwqdm3u.py
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/fb/cfbsox5zmkvdromyveuopxbmsqqjs77xdsawstverqg3ellc73p2.py
# Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x_2 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 25088
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 196) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (32, 4, 8, 8), (256, 64, 8, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 4, 144, 144), (82944, 20736, 144, 1))
assert_size_stride(primals_4, (64, 32, 4, 4), (512, 16, 4, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (32, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 35, 35), (39200, 1225, 35, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 156800, grid=grid(156800), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 16, 16), (16384, 256, 16, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf3, primals_5, 65536, grid=grid(65536), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 32, 14, 14), (6272, 196, 14, 1))
buf5 = buf4; del buf4 # reuse
buf6 = empty_strided_cuda((4, 32, 14, 14), (6272, 196, 14, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_2.run(buf5, primals_7, buf6, 25088, grid=grid(25088), stream=stream0)
del primals_7
return (reinterpret_tensor(buf5, (16, 1568), (1568, 1), 0), primals_1, primals_3, primals_4, primals_6, buf1, buf3, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 4, 8, 8), (256, 64, 8, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 144, 144), (82944, 20736, 144, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 32, 4, 4), (512, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((32, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class deepmind(nn.Module):
def __init__(self):
super(deepmind, self).__init__()
self.conv1 = nn.Conv2d(4, 32, 8, stride=4)
self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
self.conv3 = nn.Conv2d(64, 32, 3, stride=1)
nn.init.orthogonal_(self.conv1.weight.data, gain=nn.init.
calculate_gain('relu'))
nn.init.orthogonal_(self.conv2.weight.data, gain=nn.init.
calculate_gain('relu'))
nn.init.orthogonal_(self.conv3.weight.data, gain=nn.init.
calculate_gain('relu'))
nn.init.constant_(self.conv1.bias.data, 0)
nn.init.constant_(self.conv2.bias.data, 0)
nn.init.constant_(self.conv3.bias.data, 0)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = x.view(-1, 32 * 7 * 7)
return x
def get_inputs():
return [torch.rand([4, 4, 144, 144])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 156800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 1225 % 32
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_2(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 25088
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 196 % 32
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr0 + x3, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (32, 4, 8, 8), (256, 64, 8, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 4, 144, 144), (82944, 20736, 144, 1))
assert_size_stride(primals_4, (64, 32, 4, 4), (512, 16, 4, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (32,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4,
4), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 35, 35), (39200, 1225, 35, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(156800)](buf1, primals_2,
156800, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 16, 16), (16384, 256, 16, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(65536)](buf3, primals_5,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 32, 14, 14), (6272, 196, 14, 1))
buf5 = buf4
del buf4
buf6 = empty_strided_cuda((4, 32, 14, 14), (6272, 196, 14, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_2[grid(25088)](
buf5, primals_7, buf6, 25088, XBLOCK=256, num_warps=4, num_stages=1
)
del primals_7
return reinterpret_tensor(buf5, (16, 1568), (1568, 1), 0
), primals_1, primals_3, primals_4, primals_6, buf1, buf3, buf6
class deepmindNew(nn.Module):
def __init__(self):
super(deepmindNew, self).__init__()
self.conv1 = nn.Conv2d(4, 32, 8, stride=4)
self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
self.conv3 = nn.Conv2d(64, 32, 3, stride=1)
nn.init.orthogonal_(self.conv1.weight.data, gain=nn.init.
calculate_gain('relu'))
nn.init.orthogonal_(self.conv2.weight.data, gain=nn.init.
calculate_gain('relu'))
nn.init.orthogonal_(self.conv3.weight.data, gain=nn.init.
calculate_gain('relu'))
nn.init.constant_(self.conv1.bias.data, 0)
nn.init.constant_(self.conv2.bias.data, 0)
nn.init.constant_(self.conv3.bias.data, 0)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| Rowing0914/TF2_RL | deepmind | false | 17,908 | [
"MIT"
] | 8 | c1b7f9b376cbecf01deb17f76f8e761035ed336a | https://github.com/Rowing0914/TF2_RL/tree/c1b7f9b376cbecf01deb17f76f8e761035ed336a |
Bias | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/4b/c4bpho76vlcic5gjehgofej5wsbmocpg7mqjlqouz6mauxjghx2z.py
# Topologically Sorted Source Nodes: [z_1], Original ATen: [aten.add]
# Source node to ATen node mapping:
# z_1 => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_2, %primals_3), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 1, 4), (4, 4, 1))
assert_size_stride(primals_3, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 16), (16, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm]
extern_kernels.bmm(primals_2, reinterpret_tensor(primals_1, (4, 4, 16), (64, 16, 1), 0), out=buf0)
del primals_1
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 1, 4, 4), (16, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [z_1], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(buf1, primals_3, 64, grid=grid(64), stream=stream0)
del primals_3
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Bias(nn.Module):
def __init__(self):
super(Bias, self).__init__()
self.bias = nn.Parameter(torch.zeros(1))
def forward(self, feat_img, feat_sound):
B, C, H, W = feat_sound.size()
feat_img = feat_img.view(B, 1, C)
z = torch.bmm(feat_img, feat_sound.view(B, C, H * W)).view(B, 1, H, W)
z = z + self.bias
return z
def forward_nosum(self, feat_img, feat_sound):
B, C, _H, _W = feat_sound.size()
z = feat_img.view(B, C, 1, 1) * feat_sound
z = z + self.bias
return z
def forward_pixelwise(self, feats_img, feat_sound):
B, C, HI, WI = feats_img.size()
B, C, HS, WS = feat_sound.size()
feats_img = feats_img.view(B, C, HI * WI)
feats_img = feats_img.transpose(1, 2)
feat_sound = feat_sound.view(B, C, HS * WS)
z = torch.bmm(feats_img, feat_sound).view(B, HI, WI, HS, WS)
z = z + self.bias
return z
def get_inputs():
return [torch.rand([4, 1, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 1, 4), (4, 4, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 16), (16, 16, 1), torch.float32)
extern_kernels.bmm(primals_2, reinterpret_tensor(primals_1, (4, 4,
16), (64, 16, 1), 0), out=buf0)
del primals_1
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 1, 4, 4), (16, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_add_0[grid(64)](buf1, primals_3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_3
return buf1,
class BiasNew(nn.Module):
def __init__(self):
super(BiasNew, self).__init__()
self.bias = nn.Parameter(torch.zeros(1))
def forward_nosum(self, feat_img, feat_sound):
B, C, _H, _W = feat_sound.size()
z = feat_img.view(B, C, 1, 1) * feat_sound
z = z + self.bias
return z
def forward_pixelwise(self, feats_img, feat_sound):
B, C, HI, WI = feats_img.size()
B, C, HS, WS = feat_sound.size()
feats_img = feats_img.view(B, C, HI * WI)
feats_img = feats_img.transpose(1, 2)
feat_sound = feat_sound.view(B, C, HS * WS)
z = torch.bmm(feats_img, feat_sound).view(B, HI, WI, HS, WS)
z = z + self.bias
return z
def forward(self, input_0, input_1):
primals_3 = self.bias
primals_2 = input_0
primals_1 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
| SheldonTsui/Minus-Plus-Network | Bias | false | 17,909 | [
"Apache-2.0"
] | 5 | 7aa281b17f637a9f168aaf250039e560027a3817 | https://github.com/SheldonTsui/Minus-Plus-Network/tree/7aa281b17f637a9f168aaf250039e560027a3817 |
projection_model | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/cx/ccx4hsrcbj6tzud7v2w7q3mk4o6bnzcevmbfpb5tclmenter43ve.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.gelu]
# Source node to ATen node mapping:
# out => add, erf, mul, mul_1, mul_2
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.7071067811865476), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {})
triton_poi_fused_gelu_0 = async_compile.triton('triton_poi_fused_gelu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (2, 4), (4, 1))
assert_size_stride(primals_2, (2, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (512, 2), (2, 1))
assert_size_stride(primals_5, (512, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.gelu]
stream0 = get_raw_stream(0)
triton_poi_fused_gelu_0.run(buf0, buf1, 128, grid=grid(128), stream=stream0)
buf2 = empty_strided_cuda((64, 512), (512, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 2), (2, 1), 0), reinterpret_tensor(primals_4, (2, 512), (1, 2), 0), alpha=1, beta=1, out=buf2)
del primals_5
return (reinterpret_tensor(buf2, (4, 4, 4, 512), (8192, 2048, 512, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf1, (64, 2), (2, 1), 0), primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((512, 2), (2, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class projection_model(torch.nn.Module):
def __init__(self, neo_hidden, clip_hidden=512):
super(projection_model, self).__init__()
self.fc1 = torch.nn.Linear(neo_hidden, neo_hidden // 2)
self.act = torch.nn.GELU()
self.fc2 = torch.nn.Linear(neo_hidden // 2, clip_hidden)
def forward(self, input_tensor):
out = self.act(self.fc1(input_tensor))
return self.fc2(out)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'neo_hidden': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (2, 4), (4, 1))
assert_size_stride(primals_2, (2,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (512, 2), (2, 1))
assert_size_stride(primals_5, (512,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 2), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_gelu_0[grid(128)](buf0, buf1, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 512), (512, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 2), (
2, 1), 0), reinterpret_tensor(primals_4, (2, 512), (1, 2), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 512), (8192, 2048, 512, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf1, (64, 2), (2, 1), 0), primals_4
class projection_modelNew(torch.nn.Module):
def __init__(self, neo_hidden, clip_hidden=512):
super(projection_modelNew, self).__init__()
self.fc1 = torch.nn.Linear(neo_hidden, neo_hidden // 2)
self.act = torch.nn.GELU()
self.fc2 = torch.nn.Linear(neo_hidden // 2, clip_hidden)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| ShivanshuPurohit/GPT-Neo-visual-grounding | projection_model | false | 17,910 | [
"Apache-2.0"
] | 4 | 9c938257a688ef5ae8bc1b87b61d943aa158e880 | https://github.com/ShivanshuPurohit/GPT-Neo-visual-grounding/tree/9c938257a688ef5ae8bc1b87b61d943aa158e880 |
DSCLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/r3/cr3kfvilwggdekmk22piaa6bfcvsr2uhz2mv675rostqexx2wdlz.py
# Topologically Sorted Source Nodes: [sub, mul_1, mul_2, sum_1, sub_1, mul_4, add_1, sum_2], Original ATen: [aten.rsub, aten.mul, aten.sum, aten.add]
# Source node to ATen node mapping:
# add_1 => add_1
# mul_1 => mul_1
# mul_2 => mul_2
# mul_4 => mul_4
# sub => sub
# sub_1 => sub_1
# sum_1 => sum_1
# sum_2 => sum_2
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %view), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %view), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %view_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [1]), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %view), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %view), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %view_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add_1, [1]), kwargs = {})
triton_per_fused_add_mul_rsub_sum_0 = async_compile.triton('triton_per_fused_add_mul_rsub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mul_rsub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mul_rsub_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp4 = tl.load(in_ptr1 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp3 = tmp2 * tmp0
tmp5 = tmp3 * tmp4
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tmp3 + tmp4
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp9, xmask)
tl.store(out_ptr1 + (x0), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/2f/c2flqqsayoan7o3jbjj27uipdozsolea2egcj3fuccebjzhmmnqv.py
# Topologically Sorted Source Nodes: [mul_3, numerator, denominator, truediv, loss, sum_3], Original ATen: [aten.mul, aten.add, aten.div, aten.rsub, aten.sum]
# Source node to ATen node mapping:
# denominator => add_2
# loss => sub_2
# mul_3 => mul_3
# numerator => add
# sum_3 => sum_3
# truediv => div
# Graph fragment:
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, 1), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, 1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %add_2), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_2,), kwargs = {})
triton_per_fused_add_div_mul_rsub_sum_1 = async_compile.triton('triton_per_fused_add_div_mul_rsub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_rsub_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp5 = tl.load(in_ptr1 + (r0), None)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp2 + tmp3
tmp6 = tmp5 + tmp3
tmp7 = tmp4 / tmp6
tmp8 = tmp3 - tmp7
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp11, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf1 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [sub, mul_1, mul_2, sum_1, sub_1, mul_4, add_1, sum_2], Original ATen: [aten.rsub, aten.mul, aten.sum, aten.add]
stream0 = get_raw_stream(0)
triton_per_fused_add_mul_rsub_sum_0.run(arg1_1, arg0_1, buf0, buf1, 4, 64, grid=grid(4), stream=stream0)
del arg0_1
del arg1_1
buf2 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [mul_3, numerator, denominator, truediv, loss, sum_3], Original ATen: [aten.mul, aten.add, aten.div, aten.rsub, aten.sum]
triton_per_fused_add_div_mul_rsub_sum_1.run(buf0, buf1, buf2, 1, 4, grid=grid(1), stream=stream0)
del buf0
del buf1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class DSCLoss(nn.Module):
def __init__(self):
super(DSCLoss, self).__init__()
def forward(self, input, target):
N = target.size(0)
smooth = 1
input_flat = input.view(N, -1)
target_flat = target.view(N, -1)
input_flat * target_flat
numerator = 2 * ((1 - input_flat) * input_flat * target_flat).sum(1
) + smooth
denominator = ((1 - input_flat) * input_flat + target_flat).sum(1
) + smooth
loss = 1 - numerator / denominator
return loss.sum()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mul_rsub_sum_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp4 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp3 = tmp2 * tmp0
tmp5 = tmp3 * tmp4
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tmp3 + tmp4
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tl.store(out_ptr0 + x0, tmp9, xmask)
tl.store(out_ptr1 + x0, tmp14, xmask)
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_1(in_ptr0, in_ptr1, out_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp5 = tl.load(in_ptr1 + r0, None)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp2 + tmp3
tmp6 = tmp5 + tmp3
tmp7 = tmp4 / tmp6
tmp8 = tmp3 - tmp7
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp11, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_add_mul_rsub_sum_0[grid(4)](arg1_1, arg0_1, buf0,
buf1, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf2 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_add_div_mul_rsub_sum_1[grid(1)](buf0, buf1, buf2,
1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
return buf2,
class DSCLossNew(nn.Module):
def __init__(self):
super(DSCLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| SeffyVon/ECG_MICResNet | DSCLoss | false | 17,911 | [
"BSD-3-Clause"
] | 5 | 8c6a319b5822ddfb130738eb1d9cdc3c21b24209 | https://github.com/SeffyVon/ECG_MICResNet/tree/8c6a319b5822ddfb130738eb1d9cdc3c21b24209 |
TwoMLPHead | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/dh/cdh5647x2vt4hp3so4z4goddb2b2kr7d7iwhr66je27zydhmgndx.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/4h/c4h7jmnixx2mqllvzke5vnot2sslzejub5w5pmhtsm4z77plqrfv.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_2 => relu_1
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_5), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (1024, 4), (4, 1))
assert_size_stride(primals_3, (1024, ), (1, ))
assert_size_stride(primals_4, (1024, 1024), (1024, 1))
assert_size_stride(primals_5, (1024, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 1024), (1, 4), 0), out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, primals_3, 4096, grid=grid(4096), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (1024, 1024), (1, 1024), 0), out=buf2)
buf3 = buf2; del buf2 # reuse
buf4 = empty_strided_cuda((4, 1024), (1024, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf4, 4096, grid=grid(4096), stream=stream0)
del primals_5
return (buf3, primals_1, buf1, buf4, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1024, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1024, 1024), (1024, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class TwoMLPHead(nn.Module):
"""
Standard heads for FPN-based models
Arguments:
in_channels (int): number of input channels
representation_size (int): size of the intermediate representation
"""
def __init__(self, in_channels, out_channels=1024):
super(TwoMLPHead, self).__init__()
self.fc6 = nn.Linear(in_channels, out_channels)
self.fc7 = nn.Linear(out_channels, out_channels)
def forward(self, x):
x = x.flatten(start_dim=1)
x = F.relu(self.fc6(x))
x = F.relu(self.fc7(x))
return x
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (1024, 4), (4, 1))
assert_size_stride(primals_3, (1024,), (1,))
assert_size_stride(primals_4, (1024, 1024), (1024, 1))
assert_size_stride(primals_5, (1024,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 1024
), (1, 4), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(4096)](buf1, primals_3, 4096, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (1024, 1024),
(1, 1024), 0), out=buf2)
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 1024), (1024, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(4096)](buf3,
primals_5, buf4, 4096, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
return buf3, primals_1, buf1, buf4, primals_4
class TwoMLPHeadNew(nn.Module):
"""
Standard heads for FPN-based models
Arguments:
in_channels (int): number of input channels
representation_size (int): size of the intermediate representation
"""
def __init__(self, in_channels, out_channels=1024):
super(TwoMLPHeadNew, self).__init__()
self.fc6 = nn.Linear(in_channels, out_channels)
self.fc7 = nn.Linear(out_channels, out_channels)
def forward(self, input_0):
primals_2 = self.fc6.weight
primals_3 = self.fc6.bias
primals_4 = self.fc7.weight
primals_5 = self.fc7.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| Sense-GVT/BigPretrain | TwoMLPHead | false | 17,912 | [
"Apache-2.0"
] | 8 | d8d9b43d94dd1364c18c1e5ba21b85a31cdbba9e | https://github.com/Sense-GVT/BigPretrain/tree/d8d9b43d94dd1364c18c1e5ba21b85a31cdbba9e |
InterModalityUpdate | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/4c/c4cfsaei64karvaan2ahuk3tjn4lplxw5ccbwojc6bshj5jnaxq3.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_3 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 12
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/jj/cjjvp2jjivjmvhhdlq4y4iy7nbwx4nwixpmfbf4bnitm6lsl3sxe.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
# Source node to ATen node mapping:
# matmul => bmm
# Graph fragment:
# %bmm : [num_users=2] = call_function[target=torch.ops.aten.bmm.default](args = (%expand, %expand_1), kwargs = {})
triton_poi_fused_bmm_1 = async_compile.triton('triton_poi_fused_bmm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_bmm_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4 + (12*x0)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/35/c35iwrzbkjtxsy2ve7gc5mkeunmdhhjpma4yqmlmiskcy54y6k5b.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
# Source node to ATen node mapping:
# matmul => bmm
# Graph fragment:
# %bmm : [num_users=2] = call_function[target=torch.ops.aten.bmm.default](args = (%expand, %expand_1), kwargs = {})
triton_poi_fused_bmm_2 = async_compile.triton('triton_poi_fused_bmm_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_bmm_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (12*x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/wt/cwt4xyqzpxcrj6pr2focn4xhex5czflis7onjiib5h7pwm52izsw.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => exp
# Graph fragment:
# %mul_tensor_7 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {})
# %amax_default_7 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_7, [2], True), kwargs = {})
# %sub_tensor_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_7, %amax_default_7), kwargs = {})
# %div_tensor_7 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_7, 1.0), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_7,), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/pb/cpbmc74f6u5t54ddefzrxlncgtvwvfti4qzdvk44rv7s7xjn6pwq.py
# Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.bmm]
# Source node to ATen node mapping:
# matmul_2 => bmm_2
# Graph fragment:
# %bmm_2 : [num_users=2] = call_function[target=torch.ops.aten.bmm.default](args = (%expand_4, %expand_5), kwargs = {})
triton_poi_fused_bmm_4 = async_compile.triton('triton_poi_fused_bmm_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_bmm_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (5 + (12*x0)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/cc/cccbmu7miyhdu5qw6erngyrdoz7mdpumfzjdkw54pzxxm7khosyw.py
# Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.bmm]
# Source node to ATen node mapping:
# matmul_2 => bmm_2
# Graph fragment:
# %bmm_2 : [num_users=2] = call_function[target=torch.ops.aten.bmm.default](args = (%expand_4, %expand_5), kwargs = {})
triton_poi_fused_bmm_5 = async_compile.triton('triton_poi_fused_bmm_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_bmm_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (1 + (12*x0)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/rk/crkvamfn24ghwvlzarzrub3qwkwmvbhl2put5dffkokock7aadkh.py
# Topologically Sorted Source Nodes: [v_update_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# v_update_1 => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%sum_3, %sum_7], 2), kwargs = {})
triton_poi_fused_cat_6 = async_compile.triton('triton_poi_fused_cat_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x3 = (xindex // 2)
x2 = (xindex // 8)
x5 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4*x3), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (1 + (4*x3)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr0 + (2 + (4*x3)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp9 = tmp7 + tmp8
tmp10 = tl.load(in_ptr0 + (3 + (4*x3)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tmp5 / tmp11
tmp13 = tl.load(in_ptr1 + (8 + (48*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp14 = tmp12 * tmp13
tmp15 = tmp6 / tmp11
tmp16 = tl.load(in_ptr1 + (20 + (48*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp19 = tmp8 / tmp11
tmp20 = tl.load(in_ptr1 + (32 + (48*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp10 / tmp11
tmp24 = tl.load(in_ptr1 + (44 + (48*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype)
tmp28 = tl.where(tmp4, tmp26, tmp27)
tmp29 = tmp0 >= tmp3
tmp30 = tl.full([1], 2, tl.int64)
tmp31 = tmp0 < tmp30
tmp32 = tl.load(in_ptr2 + (4*x3), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp33 = tl.load(in_ptr2 + (1 + (4*x3)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp34 = tmp32 + tmp33
tmp35 = tl.load(in_ptr2 + (2 + (4*x3)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp36 = tmp34 + tmp35
tmp37 = tl.load(in_ptr2 + (3 + (4*x3)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp38 = tmp36 + tmp37
tmp39 = tmp32 / tmp38
tmp40 = tl.load(in_ptr1 + (9 + (48*x2)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp41 = tmp39 * tmp40
tmp42 = tmp33 / tmp38
tmp43 = tl.load(in_ptr1 + (21 + (48*x2)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp44 = tmp42 * tmp43
tmp45 = tmp41 + tmp44
tmp46 = tmp35 / tmp38
tmp47 = tl.load(in_ptr1 + (33 + (48*x2)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp48 = tmp46 * tmp47
tmp49 = tmp45 + tmp48
tmp50 = tmp37 / tmp38
tmp51 = tl.load(in_ptr1 + (45 + (48*x2)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp52 = tmp50 * tmp51
tmp53 = tmp49 + tmp52
tmp54 = tl.full(tmp53.shape, 0.0, tmp53.dtype)
tmp55 = tl.where(tmp29, tmp53, tmp54)
tmp56 = tl.where(tmp4, tmp28, tmp55)
tl.store(out_ptr0 + (x5), tmp56, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/wn/cwnmjlnwxcjcg4zaprrovhggctwvwpoqrzhxudodmkdc2sq6odef.py
# Topologically Sorted Source Nodes: [matmul_4], Original ATen: [aten.bmm]
# Source node to ATen node mapping:
# matmul_4 => bmm_4
# Graph fragment:
# %bmm_4 : [num_users=2] = call_function[target=torch.ops.aten.bmm.default](args = (%expand_8, %expand_9), kwargs = {})
triton_poi_fused_bmm_7 = async_compile.triton('triton_poi_fused_bmm_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_bmm_7(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (6 + (12*x0)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/hs/chswlhfk7wejlon2vddyndmqmugxf76dbmtqpkowqsva3bkrdsgh.py
# Topologically Sorted Source Nodes: [matmul_4], Original ATen: [aten.bmm]
# Source node to ATen node mapping:
# matmul_4 => bmm_4
# Graph fragment:
# %bmm_4 : [num_users=2] = call_function[target=torch.ops.aten.bmm.default](args = (%expand_8, %expand_9), kwargs = {})
triton_poi_fused_bmm_8 = async_compile.triton('triton_poi_fused_bmm_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_bmm_8(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (2 + (12*x0)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/go/cgobgnftddxrmfsjzyhd4millwile73hetgfassvn2h5zjfumaek.py
# Topologically Sorted Source Nodes: [v_update_2], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# v_update_2 => cat_2
# Graph fragment:
# %cat_2 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%cat, %sum_11], 2), kwargs = {})
triton_poi_fused_cat_9 = async_compile.triton('triton_poi_fused_cat_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_9(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x3 = (xindex // 3)
x2 = (xindex // 12)
x5 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((2*x3) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 3, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (4*x3), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.load(in_ptr1 + (1 + (4*x3)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.load(in_ptr1 + (2 + (4*x3)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.load(in_ptr1 + (3 + (4*x3)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tmp13 + tmp14
tmp16 = tmp9 / tmp15
tmp17 = tl.load(in_ptr2 + (10 + (48*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tmp16 * tmp17
tmp19 = tmp10 / tmp15
tmp20 = tl.load(in_ptr2 + (22 + (48*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp12 / tmp15
tmp24 = tl.load(in_ptr2 + (34 + (48*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tmp14 / tmp15
tmp28 = tl.load(in_ptr2 + (46 + (48*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp29 = tmp27 * tmp28
tmp30 = tmp26 + tmp29
tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype)
tmp32 = tl.where(tmp6, tmp30, tmp31)
tmp33 = tl.where(tmp4, tmp5, tmp32)
tl.store(out_ptr0 + (x5), tmp33, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/x3/cx3vwiswyxfwvumjovmut3ooi3qrin2pcbl3edbfzglibcxeeend.py
# Topologically Sorted Source Nodes: [matmul_6], Original ATen: [aten.bmm]
# Source node to ATen node mapping:
# matmul_6 => bmm_6
# Graph fragment:
# %bmm_6 : [num_users=2] = call_function[target=torch.ops.aten.bmm.default](args = (%expand_12, %expand_13), kwargs = {})
triton_poi_fused_bmm_10 = async_compile.triton('triton_poi_fused_bmm_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_bmm_10(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (7 + (12*x0)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ud/cudgll3jbwsc37cmrt4ozwdwh4aw5cmnpfe3i6bignaukvqepv77.py
# Topologically Sorted Source Nodes: [matmul_6], Original ATen: [aten.bmm]
# Source node to ATen node mapping:
# matmul_6 => bmm_6
# Graph fragment:
# %bmm_6 : [num_users=2] = call_function[target=torch.ops.aten.bmm.default](args = (%expand_12, %expand_13), kwargs = {})
triton_poi_fused_bmm_11 = async_compile.triton('triton_poi_fused_bmm_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_bmm_11(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (3 + (12*x0)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/w2/cw2f6zhfimijh7g5sp3m7ziag6gckmx2euutpkregwyqqg2mtny5.py
# Topologically Sorted Source Nodes: [v_update_3], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# v_update_3 => cat_4
# Graph fragment:
# %cat_4 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%cat_2, %sum_15], 2), kwargs = {})
triton_poi_fused_cat_12 = async_compile.triton('triton_poi_fused_cat_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_12(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x3 = (xindex // 4)
x2 = (xindex // 16)
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 3, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((3*x3) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 4, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (4*x3), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.load(in_ptr1 + (1 + (4*x3)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.load(in_ptr1 + (2 + (4*x3)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.load(in_ptr1 + (3 + (4*x3)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tmp13 + tmp14
tmp16 = tmp9 / tmp15
tmp17 = tl.load(in_ptr2 + (11 + (48*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tmp16 * tmp17
tmp19 = tmp10 / tmp15
tmp20 = tl.load(in_ptr2 + (23 + (48*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp12 / tmp15
tmp24 = tl.load(in_ptr2 + (35 + (48*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tmp14 / tmp15
tmp28 = tl.load(in_ptr2 + (47 + (48*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp29 = tmp27 * tmp28
tmp30 = tmp26 + tmp29
tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype)
tmp32 = tl.where(tmp6, tmp30, tmp31)
tmp33 = tl.where(tmp4, tmp5, tmp32)
tl.store(out_ptr0 + (x0 + (8*x3)), tmp33, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/bc/cbcho3wasw3gtqt3whgw3ktfxxvn34454dkxhrhl6fl6zvmdimiz.py
# Topologically Sorted Source Nodes: [cat_v], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_v => cat_6
# Graph fragment:
# %cat_6 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %cat_4], 2), kwargs = {})
triton_poi_fused_cat_13 = async_compile.triton('triton_poi_fused_cat_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_13(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tl.store(out_ptr0 + (x0 + (8*x1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/4o/c4omrxkwmuarnxluvgfpqumbxzuzvmpy7w5cyu3thdhit6jccdr6.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_5 => relu_2
# Graph fragment:
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_29,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_14 = async_compile.triton('triton_poi_fused_relu_threshold_backward_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_14(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (12, 4), (4, 1))
assert_size_stride(primals_4, (12, ), (1, ))
assert_size_stride(primals_5, (12, 4), (4, 1))
assert_size_stride(primals_6, (12, ), (1, ))
assert_size_stride(primals_7, (4, 8), (8, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, 8), (8, 1))
assert_size_stride(primals_10, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf0)
del primals_3
buf1 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 12), (1, 4), 0), out=buf1)
del primals_5
buf2 = reinterpret_tensor(buf0, (4, 4, 12), (48, 12, 1), 0); del buf0 # reuse
buf53 = empty_strided_cuda((4, 4, 12), (48, 12, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf2, primals_4, buf53, 192, grid=grid(192), stream=stream0)
del primals_4
buf3 = reinterpret_tensor(buf1, (4, 4, 12), (48, 12, 1), 0); del buf1 # reuse
buf52 = empty_strided_cuda((4, 4, 12), (48, 12, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_6, buf52, 192, grid=grid(192), stream=stream0)
del primals_6
buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
triton_poi_fused_bmm_1.run(buf2, buf4, 16, grid=grid(16), stream=stream0)
buf5 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
triton_poi_fused_bmm_2.run(buf3, buf5, 16, grid=grid(16), stream=stream0)
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(buf4, buf5, out=buf6)
buf7 = reinterpret_tensor(buf5, (4, 4, 1), (4, 1, 16), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
triton_poi_fused_bmm_1.run(buf3, buf7, 16, grid=grid(16), stream=stream0)
buf8 = reinterpret_tensor(buf4, (4, 1, 4), (4, 16, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
triton_poi_fused_bmm_2.run(buf2, buf8, 16, grid=grid(16), stream=stream0)
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
extern_kernels.bmm(buf7, buf8, out=buf9)
buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf6, buf10, 64, grid=grid(64), stream=stream0)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf9, buf11, 64, grid=grid(64), stream=stream0)
buf12 = reinterpret_tensor(buf8, (4, 4, 1), (4, 1, 16), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.bmm]
triton_poi_fused_bmm_4.run(buf2, buf12, 16, grid=grid(16), stream=stream0)
buf13 = reinterpret_tensor(buf7, (4, 1, 4), (4, 16, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.bmm]
triton_poi_fused_bmm_5.run(buf3, buf13, 16, grid=grid(16), stream=stream0)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.bmm]
extern_kernels.bmm(buf12, buf13, out=buf14)
buf15 = reinterpret_tensor(buf13, (4, 4, 1), (4, 1, 16), 0); del buf13 # reuse
# Topologically Sorted Source Nodes: [matmul_3], Original ATen: [aten.bmm]
triton_poi_fused_bmm_4.run(buf3, buf15, 16, grid=grid(16), stream=stream0)
buf16 = reinterpret_tensor(buf12, (4, 1, 4), (4, 16, 1), 0); del buf12 # reuse
# Topologically Sorted Source Nodes: [matmul_3], Original ATen: [aten.bmm]
triton_poi_fused_bmm_5.run(buf2, buf16, 16, grid=grid(16), stream=stream0)
buf17 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_3], Original ATen: [aten.bmm]
extern_kernels.bmm(buf15, buf16, out=buf17)
buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf14, buf18, 64, grid=grid(64), stream=stream0)
buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_3], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf17, buf19, 64, grid=grid(64), stream=stream0)
buf20 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [v_update_1], Original ATen: [aten.cat]
triton_poi_fused_cat_6.run(buf10, buf3, buf18, buf20, 32, grid=grid(32), stream=stream0)
buf21 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [q_update_1], Original ATen: [aten.cat]
triton_poi_fused_cat_6.run(buf11, buf2, buf19, buf21, 32, grid=grid(32), stream=stream0)
buf22 = reinterpret_tensor(buf16, (4, 4, 1), (4, 1, 16), 0); del buf16 # reuse
# Topologically Sorted Source Nodes: [matmul_4], Original ATen: [aten.bmm]
triton_poi_fused_bmm_7.run(buf2, buf22, 16, grid=grid(16), stream=stream0)
buf23 = reinterpret_tensor(buf15, (4, 1, 4), (4, 16, 1), 0); del buf15 # reuse
# Topologically Sorted Source Nodes: [matmul_4], Original ATen: [aten.bmm]
triton_poi_fused_bmm_8.run(buf3, buf23, 16, grid=grid(16), stream=stream0)
buf24 = buf19; del buf19 # reuse
# Topologically Sorted Source Nodes: [matmul_4], Original ATen: [aten.bmm]
extern_kernels.bmm(buf22, buf23, out=buf24)
buf25 = reinterpret_tensor(buf23, (4, 4, 1), (4, 1, 16), 0); del buf23 # reuse
# Topologically Sorted Source Nodes: [matmul_5], Original ATen: [aten.bmm]
triton_poi_fused_bmm_7.run(buf3, buf25, 16, grid=grid(16), stream=stream0)
buf26 = reinterpret_tensor(buf22, (4, 1, 4), (4, 16, 1), 0); del buf22 # reuse
# Topologically Sorted Source Nodes: [matmul_5], Original ATen: [aten.bmm]
triton_poi_fused_bmm_8.run(buf2, buf26, 16, grid=grid(16), stream=stream0)
buf27 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [matmul_5], Original ATen: [aten.bmm]
extern_kernels.bmm(buf25, buf26, out=buf27)
buf28 = buf18; del buf18 # reuse
# Topologically Sorted Source Nodes: [softmax_4], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf24, buf28, 64, grid=grid(64), stream=stream0)
buf29 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [softmax_5], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf27, buf29, 64, grid=grid(64), stream=stream0)
buf30 = empty_strided_cuda((4, 4, 3), (12, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [v_update_2], Original ATen: [aten.cat]
triton_poi_fused_cat_9.run(buf20, buf28, buf3, buf30, 48, grid=grid(48), stream=stream0)
del buf20
buf31 = empty_strided_cuda((4, 4, 3), (12, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [q_update_2], Original ATen: [aten.cat]
triton_poi_fused_cat_9.run(buf21, buf29, buf2, buf31, 48, grid=grid(48), stream=stream0)
del buf21
buf32 = reinterpret_tensor(buf26, (4, 4, 1), (4, 1, 16), 0); del buf26 # reuse
# Topologically Sorted Source Nodes: [matmul_6], Original ATen: [aten.bmm]
triton_poi_fused_bmm_10.run(buf2, buf32, 16, grid=grid(16), stream=stream0)
buf33 = reinterpret_tensor(buf25, (4, 1, 4), (4, 16, 1), 0); del buf25 # reuse
# Topologically Sorted Source Nodes: [matmul_6], Original ATen: [aten.bmm]
triton_poi_fused_bmm_11.run(buf3, buf33, 16, grid=grid(16), stream=stream0)
buf34 = buf29; del buf29 # reuse
# Topologically Sorted Source Nodes: [matmul_6], Original ATen: [aten.bmm]
extern_kernels.bmm(buf32, buf33, out=buf34)
buf35 = reinterpret_tensor(buf33, (4, 4, 1), (4, 1, 16), 0); del buf33 # reuse
# Topologically Sorted Source Nodes: [matmul_7], Original ATen: [aten.bmm]
triton_poi_fused_bmm_10.run(buf3, buf35, 16, grid=grid(16), stream=stream0)
buf36 = reinterpret_tensor(buf32, (4, 1, 4), (4, 16, 1), 0); del buf32 # reuse
# Topologically Sorted Source Nodes: [matmul_7], Original ATen: [aten.bmm]
triton_poi_fused_bmm_11.run(buf2, buf36, 16, grid=grid(16), stream=stream0)
buf37 = buf28; del buf28 # reuse
# Topologically Sorted Source Nodes: [matmul_7], Original ATen: [aten.bmm]
extern_kernels.bmm(buf35, buf36, out=buf37)
del buf35
del buf36
buf38 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_6], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf34, buf38, 64, grid=grid(64), stream=stream0)
buf39 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_7], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf37, buf39, 64, grid=grid(64), stream=stream0)
buf43 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
buf40 = reinterpret_tensor(buf43, (4, 4, 4), (32, 8, 1), 4) # alias
# Topologically Sorted Source Nodes: [v_update_3], Original ATen: [aten.cat]
triton_poi_fused_cat_12.run(buf30, buf38, buf3, buf40, 64, grid=grid(64), stream=stream0)
del buf30
buf45 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
buf41 = reinterpret_tensor(buf45, (4, 4, 4), (32, 8, 1), 4) # alias
# Topologically Sorted Source Nodes: [q_update_3], Original ATen: [aten.cat]
triton_poi_fused_cat_12.run(buf31, buf39, buf2, buf41, 64, grid=grid(64), stream=stream0)
del buf31
buf42 = reinterpret_tensor(buf43, (4, 4, 4), (32, 8, 1), 0) # alias
# Topologically Sorted Source Nodes: [cat_v], Original ATen: [aten.cat]
triton_poi_fused_cat_13.run(primals_1, buf42, 64, grid=grid(64), stream=stream0)
buf44 = reinterpret_tensor(buf45, (4, 4, 4), (32, 8, 1), 0) # alias
# Topologically Sorted Source Nodes: [cat_q], Original ATen: [aten.cat]
triton_poi_fused_cat_13.run(primals_2, buf44, 64, grid=grid(64), stream=stream0)
buf46 = reinterpret_tensor(buf39, (16, 4), (4, 1), 0); del buf39 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf43, (16, 8), (8, 1), 0), reinterpret_tensor(primals_7, (8, 4), (1, 8), 0), out=buf46)
buf47 = reinterpret_tensor(buf46, (4, 4, 4), (16, 4, 1), 0); del buf46 # reuse
buf51 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_14.run(buf47, primals_8, buf51, 64, grid=grid(64), stream=stream0)
del primals_8
buf48 = reinterpret_tensor(buf38, (16, 4), (4, 1), 0); del buf38 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf45, (16, 8), (8, 1), 0), reinterpret_tensor(primals_9, (8, 4), (1, 8), 0), out=buf48)
buf49 = reinterpret_tensor(buf48, (4, 4, 4), (16, 4, 1), 0); del buf48 # reuse
buf50 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_14.run(buf49, primals_10, buf50, 64, grid=grid(64), stream=stream0)
del primals_10
return (buf47, buf49, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), buf6, buf9, reinterpret_tensor(buf3, (4, 1, 4, 1), (48, 48, 12, 1), 8), reinterpret_tensor(buf2, (4, 1, 4, 1), (48, 48, 12, 1), 8), buf14, buf17, reinterpret_tensor(buf3, (4, 1, 4, 1), (48, 48, 12, 1), 9), reinterpret_tensor(buf2, (4, 1, 4, 1), (48, 48, 12, 1), 9), buf24, buf27, reinterpret_tensor(buf3, (4, 1, 4, 1), (48, 48, 12, 1), 10), reinterpret_tensor(buf2, (4, 1, 4, 1), (48, 48, 12, 1), 10), buf34, buf37, reinterpret_tensor(buf3, (4, 1, 4, 1), (48, 48, 12, 1), 11), reinterpret_tensor(buf2, (4, 1, 4, 1), (48, 48, 12, 1), 11), reinterpret_tensor(buf43, (16, 8), (8, 1), 0), reinterpret_tensor(buf45, (16, 8), (8, 1), 0), buf50, primals_9, buf51, primals_7, reinterpret_tensor(buf3, (4, 1, 4), (48, 1, 12), 7), reinterpret_tensor(buf2, (4, 4, 1), (48, 12, 1), 3), reinterpret_tensor(buf2, (4, 1, 4), (48, 1, 12), 7), reinterpret_tensor(buf3, (4, 4, 1), (48, 12, 1), 3), reinterpret_tensor(buf3, (4, 1, 4), (48, 1, 12), 6), reinterpret_tensor(buf2, (4, 4, 1), (48, 12, 1), 2), reinterpret_tensor(buf2, (4, 1, 4), (48, 1, 12), 6), reinterpret_tensor(buf3, (4, 4, 1), (48, 12, 1), 2), reinterpret_tensor(buf3, (4, 1, 4), (48, 1, 12), 5), reinterpret_tensor(buf2, (4, 4, 1), (48, 12, 1), 1), reinterpret_tensor(buf2, (4, 1, 4), (48, 1, 12), 5), reinterpret_tensor(buf3, (4, 4, 1), (48, 12, 1), 1), reinterpret_tensor(buf3, (4, 1, 4), (48, 1, 12), 4), reinterpret_tensor(buf2, (4, 4, 1), (48, 12, 1), 0), reinterpret_tensor(buf2, (4, 1, 4), (48, 1, 12), 4), reinterpret_tensor(buf3, (4, 4, 1), (48, 12, 1), 0), buf52, buf53, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class FCNet(nn.Module):
def __init__(self, in_size, out_size, activate=None, drop=0.0):
super(FCNet, self).__init__()
self.lin = nn.Linear(in_size, out_size)
self.drop_value = drop
self.drop = nn.Dropout(drop)
self.activate = activate.lower() if activate is not None else None
if activate == 'relu':
self.ac_fn = nn.ReLU()
elif activate == 'sigmoid':
self.ac_fn = nn.Sigmoid()
elif activate == 'tanh':
self.ac_fn = nn.Tanh()
def forward(self, x):
if self.drop_value > 0:
x = self.drop(x)
x = self.lin(x)
if self.activate is not None:
x = self.ac_fn(x)
return x
class InterModalityUpdate(nn.Module):
"""
Inter-Modality Attention Flow
"""
def __init__(self, v_size, q_size, output_size, num_head, drop=0.0):
super(InterModalityUpdate, self).__init__()
self.v_size = v_size
self.q_size = q_size
self.output_size = output_size
self.num_head = num_head
self.v_lin = FCNet(v_size, output_size * 3, drop=drop, activate='relu')
self.q_lin = FCNet(q_size, output_size * 3, drop=drop, activate='relu')
self.v_output = FCNet(output_size + v_size, output_size, drop=drop,
activate='relu')
self.q_output = FCNet(output_size + q_size, output_size, drop=drop,
activate='relu')
def forward(self, v, q):
"""
:param v: eeg feature [batch, regions, feature_size]
:param q: eye feature [batch, regions, feature_size]
:return:
"""
_batch_size, _num_obj = v.shape[0], v.shape[1]
q.shape[1]
v_tran = self.v_lin(v)
q_tran = self.q_lin(q)
v_key, v_query, v_val = torch.split(v_tran, v_tran.size(2) // 3, dim=2)
q_key, q_query, q_val = torch.split(q_tran, q_tran.size(2) // 3, dim=2)
v_key_set = torch.split(v_key, v_key.size(2) // self.num_head, dim=2)
v_query_set = torch.split(v_query, v_query.size(2) // self.num_head,
dim=2)
v_val_set = torch.split(v_val, v_val.size(2) // self.num_head, dim=2)
q_key_set = torch.split(q_key, q_key.size(2) // self.num_head, dim=2)
q_query_set = torch.split(q_query, q_query.size(2) // self.num_head,
dim=2)
q_val_set = torch.split(q_val, q_val.size(2) // self.num_head, dim=2)
for i in range(self.num_head):
v_key_slice, v_query_slice, v_val_slice = v_key_set[i
], v_query_set[i], v_val_set[i]
q_key_slice, q_query_slice, q_val_slice = q_key_set[i
], q_query_set[i], q_val_set[i]
q2v = v_query_slice @ q_key_slice.transpose(1, 2) / (self.
output_size // self.num_head) ** 0.5
v2q = q_query_slice @ v_key_slice.transpose(1, 2) / (self.
output_size // self.num_head) ** 0.5
interMAF_q2v = F.softmax(q2v, dim=2).unsqueeze(3)
interMAF_v2q = F.softmax(v2q, dim=2).unsqueeze(3)
v_update = (interMAF_q2v * q_val_slice.unsqueeze(1)).sum(2
) if i == 0 else torch.cat((v_update, (interMAF_q2v *
q_val_slice.unsqueeze(1)).sum(2)), dim=2)
q_update = (interMAF_v2q * v_val_slice.unsqueeze(1)).sum(2
) if i == 0 else torch.cat((q_update, (interMAF_v2q *
v_val_slice.unsqueeze(1)).sum(2)), dim=2)
cat_v = torch.cat((v, v_update), dim=2)
cat_q = torch.cat((q, q_update), dim=2)
updated_v = self.v_output(cat_v)
updated_q = self.q_output(cat_q)
return updated_v, updated_q
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'v_size': 4, 'q_size': 4, 'output_size': 4, 'num_head': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 12
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_bmm_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4 + 12 * x0), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_bmm_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 12 * x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_bmm_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (5 + 12 * x0), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_bmm_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (1 + 12 * x0), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x3 = xindex // 2
x2 = xindex // 8
x5 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + 4 * x3, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (1 + 4 * x3), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr0 + (2 + 4 * x3), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp9 = tmp7 + tmp8
tmp10 = tl.load(in_ptr0 + (3 + 4 * x3), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tmp5 / tmp11
tmp13 = tl.load(in_ptr1 + (8 + 48 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp14 = tmp12 * tmp13
tmp15 = tmp6 / tmp11
tmp16 = tl.load(in_ptr1 + (20 + 48 * x2), tmp4 & xmask, eviction_policy
='evict_last', other=0.0)
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp19 = tmp8 / tmp11
tmp20 = tl.load(in_ptr1 + (32 + 48 * x2), tmp4 & xmask, eviction_policy
='evict_last', other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp10 / tmp11
tmp24 = tl.load(in_ptr1 + (44 + 48 * x2), tmp4 & xmask, eviction_policy
='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype)
tmp28 = tl.where(tmp4, tmp26, tmp27)
tmp29 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp32 = tl.load(in_ptr2 + 4 * x3, tmp29 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp33 = tl.load(in_ptr2 + (1 + 4 * x3), tmp29 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp34 = tmp32 + tmp33
tmp35 = tl.load(in_ptr2 + (2 + 4 * x3), tmp29 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp36 = tmp34 + tmp35
tmp37 = tl.load(in_ptr2 + (3 + 4 * x3), tmp29 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp38 = tmp36 + tmp37
tmp39 = tmp32 / tmp38
tmp40 = tl.load(in_ptr1 + (9 + 48 * x2), tmp29 & xmask, eviction_policy
='evict_last', other=0.0)
tmp41 = tmp39 * tmp40
tmp42 = tmp33 / tmp38
tmp43 = tl.load(in_ptr1 + (21 + 48 * x2), tmp29 & xmask,
eviction_policy='evict_last', other=0.0)
tmp44 = tmp42 * tmp43
tmp45 = tmp41 + tmp44
tmp46 = tmp35 / tmp38
tmp47 = tl.load(in_ptr1 + (33 + 48 * x2), tmp29 & xmask,
eviction_policy='evict_last', other=0.0)
tmp48 = tmp46 * tmp47
tmp49 = tmp45 + tmp48
tmp50 = tmp37 / tmp38
tmp51 = tl.load(in_ptr1 + (45 + 48 * x2), tmp29 & xmask,
eviction_policy='evict_last', other=0.0)
tmp52 = tmp50 * tmp51
tmp53 = tmp49 + tmp52
tmp54 = tl.full(tmp53.shape, 0.0, tmp53.dtype)
tmp55 = tl.where(tmp29, tmp53, tmp54)
tmp56 = tl.where(tmp4, tmp28, tmp55)
tl.store(out_ptr0 + x5, tmp56, xmask)
@triton.jit
def triton_poi_fused_bmm_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (6 + 12 * x0), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_bmm_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (2 + 12 * x0), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_cat_9(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x3 = xindex // 3
x2 = xindex // 12
x5 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (2 * x3 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 3, tl.int64)
tmp9 = tl.load(in_ptr1 + 4 * x3, tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp10 = tl.load(in_ptr1 + (1 + 4 * x3), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.load(in_ptr1 + (2 + 4 * x3), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.load(in_ptr1 + (3 + 4 * x3), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tmp13 + tmp14
tmp16 = tmp9 / tmp15
tmp17 = tl.load(in_ptr2 + (10 + 48 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp18 = tmp16 * tmp17
tmp19 = tmp10 / tmp15
tmp20 = tl.load(in_ptr2 + (22 + 48 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp12 / tmp15
tmp24 = tl.load(in_ptr2 + (34 + 48 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tmp14 / tmp15
tmp28 = tl.load(in_ptr2 + (46 + 48 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp29 = tmp27 * tmp28
tmp30 = tmp26 + tmp29
tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype)
tmp32 = tl.where(tmp6, tmp30, tmp31)
tmp33 = tl.where(tmp4, tmp5, tmp32)
tl.store(out_ptr0 + x5, tmp33, xmask)
@triton.jit
def triton_poi_fused_bmm_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (7 + 12 * x0), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_bmm_11(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (3 + 12 * x0), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_cat_12(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x3 = xindex // 4
x2 = xindex // 16
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 3, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (3 * x3 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 4, tl.int64)
tmp9 = tl.load(in_ptr1 + 4 * x3, tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp10 = tl.load(in_ptr1 + (1 + 4 * x3), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.load(in_ptr1 + (2 + 4 * x3), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.load(in_ptr1 + (3 + 4 * x3), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tmp13 + tmp14
tmp16 = tmp9 / tmp15
tmp17 = tl.load(in_ptr2 + (11 + 48 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp18 = tmp16 * tmp17
tmp19 = tmp10 / tmp15
tmp20 = tl.load(in_ptr2 + (23 + 48 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp12 / tmp15
tmp24 = tl.load(in_ptr2 + (35 + 48 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tmp14 / tmp15
tmp28 = tl.load(in_ptr2 + (47 + 48 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp29 = tmp27 * tmp28
tmp30 = tmp26 + tmp29
tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype)
tmp32 = tl.where(tmp6, tmp30, tmp31)
tmp33 = tl.where(tmp4, tmp5, tmp32)
tl.store(out_ptr0 + (x0 + 8 * x3), tmp33, xmask)
@triton.jit
def triton_poi_fused_cat_13(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tl.store(out_ptr0 + (x0 + 8 * x1), tmp0, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_14(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (12, 4), (4, 1))
assert_size_stride(primals_4, (12,), (1,))
assert_size_stride(primals_5, (12, 4), (4, 1))
assert_size_stride(primals_6, (12,), (1,))
assert_size_stride(primals_7, (4, 8), (8, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 8), (8, 1))
assert_size_stride(primals_10, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf0)
del primals_3
buf1 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 12), (1, 4), 0), out=buf1)
del primals_5
buf2 = reinterpret_tensor(buf0, (4, 4, 12), (48, 12, 1), 0)
del buf0
buf53 = empty_strided_cuda((4, 4, 12), (48, 12, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(192)](buf2,
primals_4, buf53, 192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_4
buf3 = reinterpret_tensor(buf1, (4, 4, 12), (48, 12, 1), 0)
del buf1
buf52 = empty_strided_cuda((4, 4, 12), (48, 12, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(192)](buf3,
primals_6, buf52, 192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_bmm_1[grid(16)](buf2, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
triton_poi_fused_bmm_2[grid(16)](buf3, buf5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf4, buf5, out=buf6)
buf7 = reinterpret_tensor(buf5, (4, 4, 1), (4, 1, 16), 0)
del buf5
triton_poi_fused_bmm_1[grid(16)](buf3, buf7, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf8 = reinterpret_tensor(buf4, (4, 1, 4), (4, 16, 1), 0)
del buf4
triton_poi_fused_bmm_2[grid(16)](buf2, buf8, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf7, buf8, out=buf9)
buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(64)](buf6, buf10, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(64)](buf9, buf11, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf12 = reinterpret_tensor(buf8, (4, 4, 1), (4, 1, 16), 0)
del buf8
triton_poi_fused_bmm_4[grid(16)](buf2, buf12, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf13 = reinterpret_tensor(buf7, (4, 1, 4), (4, 16, 1), 0)
del buf7
triton_poi_fused_bmm_5[grid(16)](buf3, buf13, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf12, buf13, out=buf14)
buf15 = reinterpret_tensor(buf13, (4, 4, 1), (4, 1, 16), 0)
del buf13
triton_poi_fused_bmm_4[grid(16)](buf3, buf15, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf16 = reinterpret_tensor(buf12, (4, 1, 4), (4, 16, 1), 0)
del buf12
triton_poi_fused_bmm_5[grid(16)](buf2, buf16, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf17 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf15, buf16, out=buf17)
buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(64)](buf14, buf18, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(64)](buf17, buf19, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf20 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
triton_poi_fused_cat_6[grid(32)](buf10, buf3, buf18, buf20, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf21 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
triton_poi_fused_cat_6[grid(32)](buf11, buf2, buf19, buf21, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf22 = reinterpret_tensor(buf16, (4, 4, 1), (4, 1, 16), 0)
del buf16
triton_poi_fused_bmm_7[grid(16)](buf2, buf22, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf23 = reinterpret_tensor(buf15, (4, 1, 4), (4, 16, 1), 0)
del buf15
triton_poi_fused_bmm_8[grid(16)](buf3, buf23, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf24 = buf19
del buf19
extern_kernels.bmm(buf22, buf23, out=buf24)
buf25 = reinterpret_tensor(buf23, (4, 4, 1), (4, 1, 16), 0)
del buf23
triton_poi_fused_bmm_7[grid(16)](buf3, buf25, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf26 = reinterpret_tensor(buf22, (4, 1, 4), (4, 16, 1), 0)
del buf22
triton_poi_fused_bmm_8[grid(16)](buf2, buf26, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf27 = buf11
del buf11
extern_kernels.bmm(buf25, buf26, out=buf27)
buf28 = buf18
del buf18
triton_poi_fused__softmax_3[grid(64)](buf24, buf28, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf29 = buf10
del buf10
triton_poi_fused__softmax_3[grid(64)](buf27, buf29, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf30 = empty_strided_cuda((4, 4, 3), (12, 3, 1), torch.float32)
triton_poi_fused_cat_9[grid(48)](buf20, buf28, buf3, buf30, 48,
XBLOCK=64, num_warps=1, num_stages=1)
del buf20
buf31 = empty_strided_cuda((4, 4, 3), (12, 3, 1), torch.float32)
triton_poi_fused_cat_9[grid(48)](buf21, buf29, buf2, buf31, 48,
XBLOCK=64, num_warps=1, num_stages=1)
del buf21
buf32 = reinterpret_tensor(buf26, (4, 4, 1), (4, 1, 16), 0)
del buf26
triton_poi_fused_bmm_10[grid(16)](buf2, buf32, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf33 = reinterpret_tensor(buf25, (4, 1, 4), (4, 16, 1), 0)
del buf25
triton_poi_fused_bmm_11[grid(16)](buf3, buf33, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf34 = buf29
del buf29
extern_kernels.bmm(buf32, buf33, out=buf34)
buf35 = reinterpret_tensor(buf33, (4, 4, 1), (4, 1, 16), 0)
del buf33
triton_poi_fused_bmm_10[grid(16)](buf3, buf35, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf36 = reinterpret_tensor(buf32, (4, 1, 4), (4, 16, 1), 0)
del buf32
triton_poi_fused_bmm_11[grid(16)](buf2, buf36, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf37 = buf28
del buf28
extern_kernels.bmm(buf35, buf36, out=buf37)
del buf35
del buf36
buf38 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(64)](buf34, buf38, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf39 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(64)](buf37, buf39, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf43 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
buf40 = reinterpret_tensor(buf43, (4, 4, 4), (32, 8, 1), 4)
triton_poi_fused_cat_12[grid(64)](buf30, buf38, buf3, buf40, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf30
buf45 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
buf41 = reinterpret_tensor(buf45, (4, 4, 4), (32, 8, 1), 4)
triton_poi_fused_cat_12[grid(64)](buf31, buf39, buf2, buf41, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf31
buf42 = reinterpret_tensor(buf43, (4, 4, 4), (32, 8, 1), 0)
triton_poi_fused_cat_13[grid(64)](primals_1, buf42, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf44 = reinterpret_tensor(buf45, (4, 4, 4), (32, 8, 1), 0)
triton_poi_fused_cat_13[grid(64)](primals_2, buf44, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf46 = reinterpret_tensor(buf39, (16, 4), (4, 1), 0)
del buf39
extern_kernels.mm(reinterpret_tensor(buf43, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_7, (8, 4), (1, 8), 0), out=buf46)
buf47 = reinterpret_tensor(buf46, (4, 4, 4), (16, 4, 1), 0)
del buf46
buf51 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_14[grid(64)](buf47,
primals_8, buf51, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_8
buf48 = reinterpret_tensor(buf38, (16, 4), (4, 1), 0)
del buf38
extern_kernels.mm(reinterpret_tensor(buf45, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_9, (8, 4), (1, 8), 0), out=buf48)
buf49 = reinterpret_tensor(buf48, (4, 4, 4), (16, 4, 1), 0)
del buf48
buf50 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_14[grid(64)](buf49,
primals_10, buf50, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_10
return buf47, buf49, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0
), buf6, buf9, reinterpret_tensor(buf3, (4, 1, 4, 1), (48, 48, 12,
1), 8), reinterpret_tensor(buf2, (4, 1, 4, 1), (48, 48, 12, 1), 8
), buf14, buf17, reinterpret_tensor(buf3, (4, 1, 4, 1), (48, 48, 12,
1), 9), reinterpret_tensor(buf2, (4, 1, 4, 1), (48, 48, 12, 1), 9
), buf24, buf27, reinterpret_tensor(buf3, (4, 1, 4, 1), (48, 48, 12,
1), 10), reinterpret_tensor(buf2, (4, 1, 4, 1), (48, 48, 12, 1), 10
), buf34, buf37, reinterpret_tensor(buf3, (4, 1, 4, 1), (48, 48, 12,
1), 11), reinterpret_tensor(buf2, (4, 1, 4, 1), (48, 48, 12, 1), 11
), reinterpret_tensor(buf43, (16, 8), (8, 1), 0), reinterpret_tensor(
buf45, (16, 8), (8, 1), 0
), buf50, primals_9, buf51, primals_7, reinterpret_tensor(buf3, (4,
1, 4), (48, 1, 12), 7), reinterpret_tensor(buf2, (4, 4, 1), (48, 12,
1), 3), reinterpret_tensor(buf2, (4, 1, 4), (48, 1, 12), 7
), reinterpret_tensor(buf3, (4, 4, 1), (48, 12, 1), 3
), reinterpret_tensor(buf3, (4, 1, 4), (48, 1, 12), 6
), reinterpret_tensor(buf2, (4, 4, 1), (48, 12, 1), 2
), reinterpret_tensor(buf2, (4, 1, 4), (48, 1, 12), 6
), reinterpret_tensor(buf3, (4, 4, 1), (48, 12, 1), 2
), reinterpret_tensor(buf3, (4, 1, 4), (48, 1, 12), 5
), reinterpret_tensor(buf2, (4, 4, 1), (48, 12, 1), 1
), reinterpret_tensor(buf2, (4, 1, 4), (48, 1, 12), 5
), reinterpret_tensor(buf3, (4, 4, 1), (48, 12, 1), 1
), reinterpret_tensor(buf3, (4, 1, 4), (48, 1, 12), 4
), reinterpret_tensor(buf2, (4, 4, 1), (48, 12, 1), 0
), reinterpret_tensor(buf2, (4, 1, 4), (48, 1, 12), 4
), reinterpret_tensor(buf3, (4, 4, 1), (48, 12, 1), 0), buf52, buf53
class FCNet(nn.Module):
def __init__(self, in_size, out_size, activate=None, drop=0.0):
super(FCNet, self).__init__()
self.lin = nn.Linear(in_size, out_size)
self.drop_value = drop
self.drop = nn.Dropout(drop)
self.activate = activate.lower() if activate is not None else None
if activate == 'relu':
self.ac_fn = nn.ReLU()
elif activate == 'sigmoid':
self.ac_fn = nn.Sigmoid()
elif activate == 'tanh':
self.ac_fn = nn.Tanh()
def forward(self, x):
if self.drop_value > 0:
x = self.drop(x)
x = self.lin(x)
if self.activate is not None:
x = self.ac_fn(x)
return x
class InterModalityUpdateNew(nn.Module):
"""
Inter-Modality Attention Flow
"""
def __init__(self, v_size, q_size, output_size, num_head, drop=0.0):
super(InterModalityUpdateNew, self).__init__()
self.v_size = v_size
self.q_size = q_size
self.output_size = output_size
self.num_head = num_head
self.v_lin = FCNet(v_size, output_size * 3, drop=drop, activate='relu')
self.q_lin = FCNet(q_size, output_size * 3, drop=drop, activate='relu')
self.v_output = FCNet(output_size + v_size, output_size, drop=drop,
activate='relu')
self.q_output = FCNet(output_size + q_size, output_size, drop=drop,
activate='relu')
def forward(self, input_0, input_1):
primals_3 = self.v_lin.lin.weight
primals_4 = self.v_lin.lin.bias
primals_5 = self.q_lin.lin.weight
primals_6 = self.q_lin.lin.bias
primals_7 = self.v_output.lin.weight
primals_8 = self.v_output.lin.bias
primals_9 = self.q_output.lin.weight
primals_10 = self.q_output.lin.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0], output[1]
| Ruiver/CTCNet | InterModalityUpdate | false | 17,913 | [
"Apache-2.0"
] | 6 | 539e55ec9fed06028379d35dfd5cd4074755ffd8 | https://github.com/Ruiver/CTCNet/tree/539e55ec9fed06028379d35dfd5cd4074755ffd8 |
C3D_mini | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/te/ctefgwbynjk523piokfxnmlt7ltptbebl7ioqxb443vmslji6v3f.py
# Topologically Sorted Source Nodes: [conv3d, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv3d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1, 1], [1, 1, 1], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[67108864],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 67108864
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 262144) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/a6/ca6udqxnzv6oh56jp6vefxpgmtddbzcwbtoutkic73amljlruwrs.py
# Topologically Sorted Source Nodes: [conv3d_1, x_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv3d_1 => convolution_1
# x_2 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1, 1], [1, 1, 1], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[33554432],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 33554432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 65536) % 128
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/dc/cdcxto553jv4qcrkmwiqcyfcg3feks4drd2xyp2yk3ya44j4vevr.py
# Topologically Sorted Source Nodes: [conv3d_2, x_4], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv3d_2 => convolution_2
# x_4 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1, 1], [1, 1, 1], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16777216],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16777216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16384) % 256
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/3n/c3nomv7jysj7nl6dssvj4gkk7h4incnyonz23yenvc7lyrdmfmx3.py
# Topologically Sorted Source Nodes: [conv3d_4, x_7], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv3d_4 => convolution_4
# x_7 => relu_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_10, %primals_11, [1, 1, 1], [1, 1, 1], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
triton_poi_fused_convolution_relu_3 = async_compile.triton('triton_poi_fused_convolution_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4194304],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4194304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 2048) % 512
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/aa/caaifhgb2nveosu6zxh6wf2egxkqpfsqbgw3cunop6ewbahu7ne7.py
# Topologically Sorted Source Nodes: [conv3d_6, x_10], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv3d_6 => convolution_6
# x_10 => relu_6
# Graph fragment:
# %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_6, %primals_14, %primals_15, [1, 1, 1], [1, 1, 1], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
# %relu_6 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_6,), kwargs = {})
triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 512
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/yd/cyd4fpudpnjfrxnsuaab5hytmeetnbsryehezjwg4ztdnawbefuh.py
# Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_14 => relu_8
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_19), kwargs = {})
# %relu_8 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_5 = async_compile.triton('triton_poi_fused_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 9216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/s7/cs7wieang3jk23uf24gm5if5fqcsdd5bm4tf23ewwjlwsrphrqcv.py
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_16 => relu_9
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_21), kwargs = {})
# %relu_9 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_9, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_6 = async_compile.triton('triton_poi_fused_relu_threshold_backward_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_6(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 36
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21 = args
args.clear()
assert_size_stride(primals_1, (64, 3, 3, 3, 3), (81, 27, 9, 3, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64, 64), (786432, 262144, 4096, 64, 1))
assert_size_stride(primals_4, (128, 64, 3, 3, 3), (1728, 27, 9, 3, 1))
assert_size_stride(primals_5, (128, ), (1, ))
assert_size_stride(primals_6, (256, 128, 3, 3, 3), (3456, 27, 9, 3, 1))
assert_size_stride(primals_7, (256, ), (1, ))
assert_size_stride(primals_8, (256, 256, 3, 3, 3), (6912, 27, 9, 3, 1))
assert_size_stride(primals_9, (256, ), (1, ))
assert_size_stride(primals_10, (512, 256, 3, 3, 3), (6912, 27, 9, 3, 1))
assert_size_stride(primals_11, (512, ), (1, ))
assert_size_stride(primals_12, (512, 512, 3, 3, 3), (13824, 27, 9, 3, 1))
assert_size_stride(primals_13, (512, ), (1, ))
assert_size_stride(primals_14, (512, 512, 3, 3, 3), (13824, 27, 9, 3, 1))
assert_size_stride(primals_15, (512, ), (1, ))
assert_size_stride(primals_16, (512, 512, 3, 3, 3), (13824, 27, 9, 3, 1))
assert_size_stride(primals_17, (512, ), (1, ))
assert_size_stride(primals_18, (512, 8192), (8192, 1))
assert_size_stride(primals_19, (512, ), (1, ))
assert_size_stride(primals_20, (2, 512), (512, 1))
assert_size_stride(primals_21, (2, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv3d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64, 64), (16777216, 262144, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv3d, x], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 67108864, grid=grid(67108864), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool3d_with_indices]
buf2 = torch.ops.aten.max_pool3d_with_indices.default(buf1, [1, 2, 2], [1, 2, 2])
buf3 = buf2[0]
buf4 = buf2[1]
del buf2
# Topologically Sorted Source Nodes: [conv3d_1], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 128, 64, 32, 32), (8388608, 65536, 1024, 32, 1))
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [conv3d_1, x_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf6, primals_5, 33554432, grid=grid(33554432), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool3d_with_indices]
buf7 = torch.ops.aten.max_pool3d_with_indices.default(buf6, [1, 2, 2], [1, 2, 2])
buf8 = buf7[0]
buf9 = buf7[1]
del buf7
# Topologically Sorted Source Nodes: [conv3d_2], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf8, primals_6, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 256, 64, 16, 16), (4194304, 16384, 256, 16, 1))
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [conv3d_2, x_4], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf11, primals_7, 16777216, grid=grid(16777216), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [conv3d_3], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf11, primals_8, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 256, 64, 16, 16), (4194304, 16384, 256, 16, 1))
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [conv3d_3, x_5], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf13, primals_9, 16777216, grid=grid(16777216), stream=stream0)
del primals_9
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.max_pool3d_with_indices]
buf14 = torch.ops.aten.max_pool3d_with_indices.default(buf13, [2, 2, 2], [2, 2, 2])
buf15 = buf14[0]
buf16 = buf14[1]
del buf14
# Topologically Sorted Source Nodes: [conv3d_4], Original ATen: [aten.convolution]
buf17 = extern_kernels.convolution(buf15, primals_10, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 512, 32, 8, 8), (1048576, 2048, 64, 8, 1))
buf18 = buf17; del buf17 # reuse
# Topologically Sorted Source Nodes: [conv3d_4, x_7], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_3.run(buf18, primals_11, 4194304, grid=grid(4194304), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [conv3d_5], Original ATen: [aten.convolution]
buf19 = extern_kernels.convolution(buf18, primals_12, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 512, 32, 8, 8), (1048576, 2048, 64, 8, 1))
buf20 = buf19; del buf19 # reuse
# Topologically Sorted Source Nodes: [conv3d_5, x_8], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_3.run(buf20, primals_13, 4194304, grid=grid(4194304), stream=stream0)
del primals_13
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.max_pool3d_with_indices]
buf21 = torch.ops.aten.max_pool3d_with_indices.default(buf20, [2, 2, 2], [2, 2, 2])
buf22 = buf21[0]
buf23 = buf21[1]
del buf21
# Topologically Sorted Source Nodes: [conv3d_6], Original ATen: [aten.convolution]
buf24 = extern_kernels.convolution(buf22, primals_14, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 512, 16, 4, 4), (131072, 256, 16, 4, 1))
buf25 = buf24; del buf24 # reuse
# Topologically Sorted Source Nodes: [conv3d_6, x_10], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf25, primals_15, 524288, grid=grid(524288), stream=stream0)
del primals_15
# Topologically Sorted Source Nodes: [conv3d_7], Original ATen: [aten.convolution]
buf26 = extern_kernels.convolution(buf25, primals_16, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 512, 16, 4, 4), (131072, 256, 16, 4, 1))
buf27 = buf26; del buf26 # reuse
# Topologically Sorted Source Nodes: [conv3d_7, x_11], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf27, primals_17, 524288, grid=grid(524288), stream=stream0)
del primals_17
# Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.max_pool3d_with_indices]
buf28 = torch.ops.aten.max_pool3d_with_indices.default(buf27, [2, 2, 2], [2, 2, 2], [0, 1, 1])
buf29 = buf28[0]
buf30 = buf28[1]
del buf28
buf31 = empty_strided_cuda((18, 512), (512, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf29, (18, 8192), (8192, 1), 0), reinterpret_tensor(primals_18, (8192, 512), (1, 8192), 0), out=buf31)
buf32 = buf31; del buf31 # reuse
# Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.relu]
triton_poi_fused_relu_5.run(buf32, primals_19, 9216, grid=grid(9216), stream=stream0)
del primals_19
buf33 = empty_strided_cuda((18, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf32, reinterpret_tensor(primals_20, (512, 2), (1, 512), 0), out=buf33)
buf34 = buf33; del buf33 # reuse
buf35 = empty_strided_cuda((18, 2), (2, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_6.run(buf34, primals_21, buf35, 36, grid=grid(36), stream=stream0)
del primals_21
return (buf34, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, buf1, buf3, buf4, buf6, buf8, buf9, buf11, buf13, buf15, buf16, buf18, buf20, buf22, buf23, buf25, buf27, buf30, reinterpret_tensor(buf29, (18, 8192), (8192, 1), 0), buf32, buf35, primals_20, primals_18, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 3, 3, 3, 3), (81, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64, 64), (786432, 262144, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((128, 64, 3, 3, 3), (1728, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((256, 128, 3, 3, 3), (3456, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((256, 256, 3, 3, 3), (6912, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((512, 256, 3, 3, 3), (6912, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((512, 512, 3, 3, 3), (13824, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((512, 512, 3, 3, 3), (13824, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((512, 512, 3, 3, 3), (13824, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((512, 8192), (8192, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((2, 512), (512, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class C3D_mini(nn.Module):
""" The C3D_mini network """
def __init__(self, num_classes=2, pretrained=False):
super(C3D_mini, self).__init__()
self.conv1 = nn.Conv3d(3, 64, kernel_size=(3, 3, 3), padding=(1, 1, 1))
self.pool1 = nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2))
self.conv2 = nn.Conv3d(64, 128, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.pool2 = nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2))
self.conv3a = nn.Conv3d(128, 256, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.conv3b = nn.Conv3d(256, 256, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.pool3 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))
self.conv4a = nn.Conv3d(256, 512, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.conv4b = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.pool4 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))
self.conv5a = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.conv5b = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.pool5 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2),
padding=(0, 1, 1))
self.fc6 = nn.Linear(8192, 512)
self.fc7 = nn.Linear(512, num_classes)
self.dropout = nn.Dropout(p=0.5)
self.relu = nn.ReLU()
self.__init_weight()
def __init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv3d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
x = self.relu(self.conv1(x))
x = self.pool1(x)
x = self.relu(self.conv2(x))
x = self.pool2(x)
x = self.relu(self.conv3a(x))
x = self.relu(self.conv3b(x))
x = self.pool3(x)
x = self.relu(self.conv4a(x))
x = self.relu(self.conv4b(x))
x = self.pool4(x)
x = self.relu(self.conv5a(x))
x = self.relu(self.conv5b(x))
x = self.pool5(x)
x = x.view(-1, 8192)
x = self.relu(self.fc6(x))
x = self.dropout(x)
x = self.relu(self.fc7(x))
x = self.dropout(x)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 262144 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 65536 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16384 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 2048 % 512
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 512
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 9216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_6(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 36
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21) = args
args.clear()
assert_size_stride(primals_1, (64, 3, 3, 3, 3), (81, 27, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64, 64), (786432, 262144, 4096,
64, 1))
assert_size_stride(primals_4, (128, 64, 3, 3, 3), (1728, 27, 9, 3, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (256, 128, 3, 3, 3), (3456, 27, 9, 3, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (256, 256, 3, 3, 3), (6912, 27, 9, 3, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (512, 256, 3, 3, 3), (6912, 27, 9, 3, 1))
assert_size_stride(primals_11, (512,), (1,))
assert_size_stride(primals_12, (512, 512, 3, 3, 3), (13824, 27, 9, 3, 1))
assert_size_stride(primals_13, (512,), (1,))
assert_size_stride(primals_14, (512, 512, 3, 3, 3), (13824, 27, 9, 3, 1))
assert_size_stride(primals_15, (512,), (1,))
assert_size_stride(primals_16, (512, 512, 3, 3, 3), (13824, 27, 9, 3, 1))
assert_size_stride(primals_17, (512,), (1,))
assert_size_stride(primals_18, (512, 8192), (8192, 1))
assert_size_stride(primals_19, (512,), (1,))
assert_size_stride(primals_20, (2, 512), (512, 1))
assert_size_stride(primals_21, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64, 64), (16777216, 262144,
4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(67108864)](buf1, primals_2,
67108864, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = torch.ops.aten.max_pool3d_with_indices.default(buf1, [1, 2,
2], [1, 2, 2])
buf3 = buf2[0]
buf4 = buf2[1]
del buf2
buf5 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1, 1),
padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 128, 64, 32, 32), (8388608, 65536,
1024, 32, 1))
buf6 = buf5
del buf5
triton_poi_fused_convolution_relu_1[grid(33554432)](buf6, primals_5,
33554432, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf7 = torch.ops.aten.max_pool3d_with_indices.default(buf6, [1, 2,
2], [1, 2, 2])
buf8 = buf7[0]
buf9 = buf7[1]
del buf7
buf10 = extern_kernels.convolution(buf8, primals_6, stride=(1, 1, 1
), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 256, 64, 16, 16), (4194304, 16384,
256, 16, 1))
buf11 = buf10
del buf10
triton_poi_fused_convolution_relu_2[grid(16777216)](buf11,
primals_7, 16777216, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf12 = extern_kernels.convolution(buf11, primals_8, stride=(1, 1,
1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 256, 64, 16, 16), (4194304, 16384,
256, 16, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_relu_2[grid(16777216)](buf13,
primals_9, 16777216, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_9
buf14 = torch.ops.aten.max_pool3d_with_indices.default(buf13, [2, 2,
2], [2, 2, 2])
buf15 = buf14[0]
buf16 = buf14[1]
del buf14
buf17 = extern_kernels.convolution(buf15, primals_10, stride=(1, 1,
1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 512, 32, 8, 8), (1048576, 2048, 64, 8, 1)
)
buf18 = buf17
del buf17
triton_poi_fused_convolution_relu_3[grid(4194304)](buf18,
primals_11, 4194304, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_11
buf19 = extern_kernels.convolution(buf18, primals_12, stride=(1, 1,
1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 512, 32, 8, 8), (1048576, 2048, 64, 8, 1)
)
buf20 = buf19
del buf19
triton_poi_fused_convolution_relu_3[grid(4194304)](buf20,
primals_13, 4194304, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_13
buf21 = torch.ops.aten.max_pool3d_with_indices.default(buf20, [2, 2,
2], [2, 2, 2])
buf22 = buf21[0]
buf23 = buf21[1]
del buf21
buf24 = extern_kernels.convolution(buf22, primals_14, stride=(1, 1,
1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 512, 16, 4, 4), (131072, 256, 16, 4, 1))
buf25 = buf24
del buf24
triton_poi_fused_convolution_relu_4[grid(524288)](buf25, primals_15,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_15
buf26 = extern_kernels.convolution(buf25, primals_16, stride=(1, 1,
1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 512, 16, 4, 4), (131072, 256, 16, 4, 1))
buf27 = buf26
del buf26
triton_poi_fused_convolution_relu_4[grid(524288)](buf27, primals_17,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_17
buf28 = torch.ops.aten.max_pool3d_with_indices.default(buf27, [2, 2,
2], [2, 2, 2], [0, 1, 1])
buf29 = buf28[0]
buf30 = buf28[1]
del buf28
buf31 = empty_strided_cuda((18, 512), (512, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf29, (18, 8192), (8192, 1),
0), reinterpret_tensor(primals_18, (8192, 512), (1, 8192), 0),
out=buf31)
buf32 = buf31
del buf31
triton_poi_fused_relu_5[grid(9216)](buf32, primals_19, 9216, XBLOCK
=128, num_warps=4, num_stages=1)
del primals_19
buf33 = empty_strided_cuda((18, 2), (2, 1), torch.float32)
extern_kernels.mm(buf32, reinterpret_tensor(primals_20, (512, 2), (
1, 512), 0), out=buf33)
buf34 = buf33
del buf33
buf35 = empty_strided_cuda((18, 2), (2, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_6[grid(36)](buf34,
primals_21, buf35, 36, XBLOCK=64, num_warps=1, num_stages=1)
del primals_21
return (buf34, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, primals_16, buf1, buf3, buf4,
buf6, buf8, buf9, buf11, buf13, buf15, buf16, buf18, buf20, buf22,
buf23, buf25, buf27, buf30, reinterpret_tensor(buf29, (18, 8192), (
8192, 1), 0), buf32, buf35, primals_20, primals_18)
class C3D_miniNew(nn.Module):
""" The C3D_mini network """
def __init__(self, num_classes=2, pretrained=False):
super(C3D_miniNew, self).__init__()
self.conv1 = nn.Conv3d(3, 64, kernel_size=(3, 3, 3), padding=(1, 1, 1))
self.pool1 = nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2))
self.conv2 = nn.Conv3d(64, 128, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.pool2 = nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2))
self.conv3a = nn.Conv3d(128, 256, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.conv3b = nn.Conv3d(256, 256, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.pool3 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))
self.conv4a = nn.Conv3d(256, 512, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.conv4b = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.pool4 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))
self.conv5a = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.conv5b = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.pool5 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2),
padding=(0, 1, 1))
self.fc6 = nn.Linear(8192, 512)
self.fc7 = nn.Linear(512, num_classes)
self.dropout = nn.Dropout(p=0.5)
self.relu = nn.ReLU()
self.__init_weight()
def __init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv3d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3a.weight
primals_7 = self.conv3a.bias
primals_8 = self.conv3b.weight
primals_9 = self.conv3b.bias
primals_10 = self.conv4a.weight
primals_11 = self.conv4a.bias
primals_12 = self.conv4b.weight
primals_13 = self.conv4b.bias
primals_14 = self.conv5a.weight
primals_15 = self.conv5a.bias
primals_16 = self.conv5b.weight
primals_17 = self.conv5b.bias
primals_18 = self.fc6.weight
primals_19 = self.fc6.bias
primals_20 = self.fc7.weight
primals_21 = self.fc7.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21])
return output[0]
| Ontheway361/C3D | C3D_mini | false | 17,914 | [
"MIT"
] | 7 | 7aa5364d8c0c6bddc17b1b8939b198fe66e282ca | https://github.com/Ontheway361/C3D/tree/7aa5364d8c0c6bddc17b1b8939b198fe66e282ca |
InnerProd | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/ju/cjuillrts46dwht2m5m65xuggokggi6b2orz7q3ihbfb5zto7q47.py
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %primals_3), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/bn/cbntjutwqfdhbtihfocfe5m7ui3zmlpqsqgyx5iwg3fenitfwyaf.py
# Topologically Sorted Source Nodes: [z_1], Original ATen: [aten.add]
# Source node to ATen node mapping:
# z_1 => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_2, %primals_4), kwargs = {})
triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 1, 4), (4, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_2, primals_3, buf0, 16, grid=grid(16), stream=stream0)
del primals_3
buf1 = empty_strided_cuda((4, 1, 16), (16, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, bmm], Original ATen: [aten.mul, aten.bmm]
extern_kernels.bmm(buf0, reinterpret_tensor(primals_1, (4, 4, 16), (64, 16, 1), 0), out=buf1)
del buf0
buf2 = reinterpret_tensor(buf1, (4, 1, 4, 4), (16, 16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [z_1], Original ATen: [aten.add]
triton_poi_fused_add_1.run(buf2, primals_4, 64, grid=grid(64), stream=stream0)
del primals_4
return (buf2, primals_2, reinterpret_tensor(primals_1, (4, 16, 4), (64, 1, 16), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class InnerProd(nn.Module):
def __init__(self, fc_dim):
super(InnerProd, self).__init__()
self.scale = nn.Parameter(torch.ones(fc_dim))
self.bias = nn.Parameter(torch.zeros(1))
def forward(self, feat_img, feat_sound):
sound_size = feat_sound.size()
B, C = sound_size[0], sound_size[1]
feat_img = feat_img.view(B, 1, C)
z = torch.bmm(feat_img * self.scale, feat_sound.view(B, C, -1)).view(B,
1, *sound_size[2:])
z = z + self.bias
return z
def forward_nosum(self, feat_img, feat_sound):
B, C, _H, _W = feat_sound.size()
feat_img = feat_img.view(B, C)
z = (feat_img * self.scale).view(B, C, 1, 1) * feat_sound
z = z + self.bias
return z
def forward_pixelwise(self, feats_img, feat_sound):
B, C, HI, WI = feats_img.size()
B, C, HS, WS = feat_sound.size()
feats_img = feats_img.view(B, C, HI * WI)
feats_img = feats_img.transpose(1, 2)
feat_sound = feat_sound.view(B, C, HS * WS)
z = torch.bmm(feats_img * self.scale, feat_sound).view(B, HI, WI,
HS, WS)
z = z + self.bias
return z
def get_inputs():
return [torch.rand([4, 1, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'fc_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 1, 4), (4, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](primals_2, primals_3, buf0, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
buf1 = empty_strided_cuda((4, 1, 16), (16, 16, 1), torch.float32)
extern_kernels.bmm(buf0, reinterpret_tensor(primals_1, (4, 4, 16),
(64, 16, 1), 0), out=buf1)
del buf0
buf2 = reinterpret_tensor(buf1, (4, 1, 4, 4), (16, 16, 4, 1), 0)
del buf1
triton_poi_fused_add_1[grid(64)](buf2, primals_4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_4
return buf2, primals_2, reinterpret_tensor(primals_1, (4, 16, 4), (64,
1, 16), 0)
class InnerProdNew(nn.Module):
def __init__(self, fc_dim):
super(InnerProdNew, self).__init__()
self.scale = nn.Parameter(torch.ones(fc_dim))
self.bias = nn.Parameter(torch.zeros(1))
def forward_nosum(self, feat_img, feat_sound):
B, C, _H, _W = feat_sound.size()
feat_img = feat_img.view(B, C)
z = (feat_img * self.scale).view(B, C, 1, 1) * feat_sound
z = z + self.bias
return z
def forward_pixelwise(self, feats_img, feat_sound):
B, C, HI, WI = feats_img.size()
B, C, HS, WS = feat_sound.size()
feats_img = feats_img.view(B, C, HI * WI)
feats_img = feats_img.transpose(1, 2)
feat_sound = feat_sound.view(B, C, HS * WS)
z = torch.bmm(feats_img * self.scale, feat_sound).view(B, HI, WI,
HS, WS)
z = z + self.bias
return z
def forward(self, input_0, input_1):
primals_3 = self.scale
primals_4 = self.bias
primals_2 = input_0
primals_1 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| SheldonTsui/Minus-Plus-Network | InnerProd | false | 17,915 | [
"Apache-2.0"
] | 5 | 7aa281b17f637a9f168aaf250039e560027a3817 | https://github.com/SheldonTsui/Minus-Plus-Network/tree/7aa281b17f637a9f168aaf250039e560027a3817 |
Actor | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/ql/cqlq47koaaqw5tflq2wvx7vmgob6ibw2kevxe6xtlw2473y5muvu.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 400
x2 = xindex % 1600
x3 = (xindex // 1600)
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x2 + (1664*x3)), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/o5/co5j7xakfhhc2bxzzoma6jpl2aqdebizbpgpkpux27nncuhfh6dp.py
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_3 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 300
x2 = (xindex // 1200)
x3 = xindex % 1200
tmp0 = tl.load(in_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3 + (1216*x2)), tmp4, xmask)
tl.store(out_ptr1 + (x3 + (1280*x2)), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/we/cwexc5tt6vtp2fs2jftpsj4axfai7gjl6pufpgxheknpm3cz342w.py
# Topologically Sorted Source Nodes: [out_3, out_4], Original ATen: [aten.relu, aten.view]
# Source node to ATen node mapping:
# out_3 => relu_1
# out_4 => view_4
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %view_4 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%relu_1, [64, 300]), kwargs = {})
triton_poi_fused_relu_view_2 = async_compile.triton('triton_poi_fused_relu_view_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_view_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_view_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 300
x1 = (xindex // 300)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (300*(x1 % 4)) + (1216*(x1 // 4))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/hz/chzzpinm3r4gylnyldvam65snklygw2ep5w7f7lblmbge5sot3jj.py
# Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# out_5 => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_5,), kwargs = {})
triton_poi_fused_sigmoid_3 = async_compile.triton('triton_poi_fused_sigmoid_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (400, 4), (4, 1))
assert_size_stride(primals_2, (400, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (300, 400), (400, 1))
assert_size_stride(primals_5, (300, ), (1, ))
assert_size_stride(primals_6, (4, 300), (300, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 400), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 400), (6400, 1600, 400, 1), 0); del buf0 # reuse
buf8 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf8, 25600, grid=grid(25600), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 300), (300, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 400), (400, 1), 0), reinterpret_tensor(primals_4, (400, 300), (1, 400), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4, 4, 300), (4864, 1216, 300, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf2, primals_5, buf3, buf7, 19200, grid=grid(19200), stream=stream0)
del primals_5
buf4 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [out_3, out_4], Original ATen: [aten.relu, aten.view]
triton_poi_fused_relu_view_2.run(buf3, buf4, 19200, grid=grid(19200), stream=stream0)
del buf3
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf4, reinterpret_tensor(primals_6, (300, 4), (1, 300), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_3.run(buf6, primals_7, 256, grid=grid(256), stream=stream0)
del primals_7
return (buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 400), (400, 1), 0), buf4, buf6, primals_6, buf7, primals_4, buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((400, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((300, 400), (400, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((300, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 300), (300, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
class Actor(nn.Module):
def __init__(self, nb_states, nb_actions, hidden1=400, hidden2=300,
init_w=0.003):
super(Actor, self).__init__()
self.fc1 = nn.Linear(nb_states, hidden1)
self.fc2 = nn.Linear(hidden1, hidden2)
self.fc3 = nn.Linear(hidden2, nb_actions)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def forward(self, x):
out = self.fc1(x)
out = self.relu(out)
out = self.fc2(out)
out = self.relu(out)
out = self.fc3(out)
out = self.sigmoid(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'nb_states': 4, 'nb_actions': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 400
x2 = xindex % 1600
x3 = xindex // 1600
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 300
x2 = xindex // 1200
x3 = xindex % 1200
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3 + 1216 * x2), tmp4, xmask)
tl.store(out_ptr1 + (x3 + 1280 * x2), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_view_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 300
x1 = xindex // 300
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 300 * (x1 % 4) + 1216 * (x1 // 4)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_sigmoid_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (400, 4), (4, 1))
assert_size_stride(primals_2, (400,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (300, 400), (400, 1))
assert_size_stride(primals_5, (300,), (1,))
assert_size_stride(primals_6, (4, 300), (300, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 400), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 400), (6400, 1600, 400, 1), 0
)
del buf0
buf8 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(25600)](buf1,
primals_2, buf8, 25600, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 300), (300, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 400), (400, 1), 0),
reinterpret_tensor(primals_4, (400, 300), (1, 400), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4, 4, 300), (4864, 1216, 300, 1),
torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(19200)](buf2,
primals_5, buf3, buf7, 19200, XBLOCK=128, num_warps=4, num_stages=1
)
del primals_5
buf4 = buf2
del buf2
triton_poi_fused_relu_view_2[grid(19200)](buf3, buf4, 19200, XBLOCK
=256, num_warps=4, num_stages=1)
del buf3
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(buf4, reinterpret_tensor(primals_6, (300, 4), (1,
300), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_sigmoid_3[grid(256)](buf6, primals_7, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_7
return buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 400), (400, 1), 0
), buf4, buf6, primals_6, buf7, primals_4, buf8
class ActorNew(nn.Module):
def __init__(self, nb_states, nb_actions, hidden1=400, hidden2=300,
init_w=0.003):
super(ActorNew, self).__init__()
self.fc1 = nn.Linear(nb_states, hidden1)
self.fc2 = nn.Linear(hidden1, hidden2)
self.fc3 = nn.Linear(hidden2, nb_actions)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| Sharpiless/HAQ-for-Mobilenetv3-Quantization | Actor | false | 17,916 | [
"MIT"
] | 5 | 76b7d98471adb666ad140abd2518bce6f0de3cfa | https://github.com/Sharpiless/HAQ-for-Mobilenetv3-Quantization/tree/76b7d98471adb666ad140abd2518bce6f0de3cfa |
FeedForward | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/33/c33qod4eywk7mffyetsgrrnqtw57afbqr2uzg57m7hv3zbt7jxfu.py
# Topologically Sorted Source Nodes: [pow_1, mul, add, mul_1, tanh, add_1, cdf, x_1], Original ATen: [aten.pow, aten.mul, aten.add, aten.tanh]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# cdf => mul_2
# mul => mul
# mul_1 => mul_1
# pow_1 => pow_1
# tanh => tanh
# x_1 => mul_3
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_1, 3), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.044715), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %mul), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.7978845608028654), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%mul_1,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, 1.0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 0.5), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %mul_2), kwargs = {})
triton_poi_fused_add_mul_pow_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_pow_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_pow_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_pow_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tmp0 * tmp0
tmp2 = tmp1 * tmp0
tmp3 = 0.044715
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tmp6 = 0.7978845608028654
tmp7 = tmp5 * tmp6
tmp8 = libdevice.tanh(tmp7)
tmp9 = 1.0
tmp10 = tmp8 + tmp9
tmp11 = 0.5
tmp12 = tmp10 * tmp11
tmp13 = tmp0 * tmp12
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, mul, add, mul_1, tanh, add_1, cdf, x_1], Original ATen: [aten.pow, aten.mul, aten.add, aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_pow_tanh_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
def activation(act_type='swish'):
if act_type == 'swish':
act = swish()
return act
else:
act = nn.ReLU(inplace=True)
return act
class swish(nn.Module):
def __init__(self):
super(swish, self).__init__()
def forward(self, x):
x = x * torch.sigmoid(x)
return x
class GELU(nn.Module):
"""
Gaussian Error Linear Units, based on
`"Gaussian Error Linear Units (GELUs)" <https://arxiv.org/abs/1606.08415>`
"""
def __init__(self, approximate=True):
super(GELU, self).__init__()
self.approximate = approximate
def forward(self, x):
if self.approximate:
cdf = 0.5 * (1.0 + torch.tanh(math.sqrt(2 / math.pi) * (x +
0.044715 * torch.pow(x, 3))))
return x * cdf
else:
return x * (torch.erf(x / math.sqrt(2)) + 1) / 2
class FeedForward(nn.Module):
def __init__(self, dim, hidden_dim, dropout=0.1, activation=GELU):
super(FeedForward, self).__init__()
self.mlp1 = nn.Linear(dim, hidden_dim)
self.act = activation()
self.mlp2 = nn.Linear(hidden_dim, dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
x = self.mlp1(x)
x = self.act(x)
x = self.dropout(x)
x = self.mlp2(x)
x = self.dropout(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4, 'hidden_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_pow_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 * tmp0
tmp2 = tmp1 * tmp0
tmp3 = 0.044715
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tmp6 = 0.7978845608028654
tmp7 = tmp5 * tmp6
tmp8 = libdevice.tanh(tmp7)
tmp9 = 1.0
tmp10 = tmp8 + tmp9
tmp11 = 0.5
tmp12 = tmp10 * tmp11
tmp13 = tmp0 * tmp12
tl.store(out_ptr0 + x0, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_pow_tanh_0[grid(256)](buf0, buf1, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4
def activation(act_type='swish'):
if act_type == 'swish':
act = swish()
return act
else:
act = nn.ReLU(inplace=True)
return act
class swish(nn.Module):
def __init__(self):
super(swish, self).__init__()
def forward(self, x):
x = x * torch.sigmoid(x)
return x
class GELU(nn.Module):
"""
Gaussian Error Linear Units, based on
`"Gaussian Error Linear Units (GELUs)" <https://arxiv.org/abs/1606.08415>`
"""
def __init__(self, approximate=True):
super(GELU, self).__init__()
self.approximate = approximate
def forward(self, x):
if self.approximate:
cdf = 0.5 * (1.0 + torch.tanh(math.sqrt(2 / math.pi) * (x +
0.044715 * torch.pow(x, 3))))
return x * cdf
else:
return x * (torch.erf(x / math.sqrt(2)) + 1) / 2
class FeedForwardNew(nn.Module):
def __init__(self, dim, hidden_dim, dropout=0.1, activation=GELU):
super(FeedForwardNew, self).__init__()
self.mlp1 = nn.Linear(dim, hidden_dim)
self.act = activation()
self.mlp2 = nn.Linear(hidden_dim, dim)
self.dropout = nn.Dropout(dropout)
def forward(self, input_0):
primals_1 = self.mlp1.weight
primals_2 = self.mlp1.bias
primals_4 = self.mlp2.weight
primals_5 = self.mlp2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| Sense-GVT/BigPretrain | FeedForward | false | 17,917 | [
"Apache-2.0"
] | 8 | d8d9b43d94dd1364c18c1e5ba21b85a31cdbba9e | https://github.com/Sense-GVT/BigPretrain/tree/d8d9b43d94dd1364c18c1e5ba21b85a31cdbba9e |
SIMPA | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/ue/cuehlclauwuiv76uzvfjk3bhdmsnqcteo3q7enlhztfdeoev7avt.py
# Topologically Sorted Source Nodes: [feat_n_1, mul_2, feat_n_2, mul_3, feat_n_3, mul_4, feat_n_4, mul_6, feat_n_5, mul_7, feat_n_6, mul_8, feat_n_7, mul_10, feat_n_8, mul_11, feat_n_9, mul_13, feat_n_10], Original ATen: [aten.add, aten.mul]
# Source node to ATen node mapping:
# feat_n_1 => mul_1
# feat_n_10 => add_12
# feat_n_2 => add_1
# feat_n_3 => add_2
# feat_n_4 => add_3
# feat_n_5 => add_5
# feat_n_6 => add_6
# feat_n_7 => add_7
# feat_n_8 => add_9
# feat_n_9 => add_10
# mul_10 => mul_10
# mul_11 => mul_11
# mul_13 => mul_13
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# mul_6 => mul_6
# mul_7 => mul_7
# mul_8 => mul_8
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_1, %view_2), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_2, %view_5), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_3, %view_8), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %mul_3), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_4, %view_11), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %mul_4), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_6, %view_20), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %mul_6), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_7, %view_23), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %mul_7), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_8, %view_26), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_6, %mul_8), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_10, %view_35), kwargs = {})
# %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_7, %mul_10), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_11, %view_38), kwargs = {})
# %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_9, %mul_11), kwargs = {})
# %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_13, %view_47), kwargs = {})
# %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_10, %mul_13), kwargs = {})
triton_poi_fused_add_mul_0 = async_compile.triton('triton_poi_fused_add_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 20, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
x1 = xindex % 64
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + (x0), xmask)
tmp4 = tl.load(in_ptr0 + (1))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp6 = tl.load(in_ptr2 + (x0), xmask)
tmp9 = tl.load(in_ptr0 + (2))
tmp10 = tl.broadcast_to(tmp9, [XBLOCK])
tmp11 = tl.load(in_ptr3 + (x0), xmask)
tmp14 = tl.load(in_ptr0 + (3))
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp16 = tl.load(in_ptr4 + (x0), xmask)
tmp19 = tl.load(in_ptr0 + (4))
tmp20 = tl.broadcast_to(tmp19, [XBLOCK])
tmp21 = tl.load(in_ptr5 + (x0), xmask)
tmp24 = tl.load(in_ptr0 + (5))
tmp25 = tl.broadcast_to(tmp24, [XBLOCK])
tmp26 = tl.load(in_ptr6 + (x0), xmask)
tmp29 = tl.load(in_ptr0 + (6))
tmp30 = tl.broadcast_to(tmp29, [XBLOCK])
tmp31 = tl.load(in_ptr7 + (x0), xmask)
tmp34 = tl.load(in_ptr0 + (7))
tmp35 = tl.broadcast_to(tmp34, [XBLOCK])
tmp36 = tl.load(in_ptr8 + (x0), xmask)
tmp39 = tl.load(in_ptr0 + (8))
tmp40 = tl.broadcast_to(tmp39, [XBLOCK])
tmp41 = tl.load(in_ptr9 + (x0), xmask)
tmp44 = tl.load(in_ptr0 + (9))
tmp45 = tl.broadcast_to(tmp44, [XBLOCK])
tmp46 = tl.load(in_ptr10 + (x0), xmask)
tmp3 = tmp1 * tmp2
tmp7 = tmp5 * tmp6
tmp8 = tmp3 + tmp7
tmp12 = tmp10 * tmp11
tmp13 = tmp8 + tmp12
tmp17 = tmp15 * tmp16
tmp18 = tmp13 + tmp17
tmp22 = tmp20 * tmp21
tmp23 = tmp18 + tmp22
tmp27 = tmp25 * tmp26
tmp28 = tmp23 + tmp27
tmp32 = tmp30 * tmp31
tmp33 = tmp28 + tmp32
tmp37 = tmp35 * tmp36
tmp38 = tmp33 + tmp37
tmp42 = tmp40 * tmp41
tmp43 = tmp38 + tmp42
tmp47 = tmp45 * tmp46
tmp48 = tmp43 + tmp47
tl.store(out_ptr0 + (x1 + (128*x2)), tmp48, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ge/cgecbbl6j65t3ghaemdub7fg6jztg4yncnbnio2zltm5ji6f3qnx.py
# Topologically Sorted Source Nodes: [feat_p, mul_5, feat_p_1, mul_9, feat_p_2, mul_12, feat_p_3, mul_14, feat_p_4], Original ATen: [aten.mul, aten.add]
# Source node to ATen node mapping:
# feat_p => mul
# feat_p_1 => add_4
# feat_p_2 => add_8
# feat_p_3 => add_11
# feat_p_4 => add_13
# mul_12 => mul_12
# mul_14 => mul_14
# mul_5 => mul_5
# mul_9 => mul_9
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select, %primals_2), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_5, %view_14), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_5), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_9, %view_29), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %mul_9), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_12, %view_41), kwargs = {})
# %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_8, %mul_12), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_14, %view_50), kwargs = {})
# %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_11, %mul_14), kwargs = {})
triton_poi_fused_add_mul_1 = async_compile.triton('triton_poi_fused_add_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
x1 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + (x2), xmask)
tmp4 = tl.load(in_ptr0 + (1))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp6 = tl.load(in_ptr2 + (x2), xmask)
tmp9 = tl.load(in_ptr0 + (2))
tmp10 = tl.broadcast_to(tmp9, [XBLOCK])
tmp11 = tl.load(in_ptr3 + (x2), xmask)
tmp14 = tl.load(in_ptr0 + (3))
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp16 = tl.load(in_ptr4 + (x2), xmask)
tmp19 = tl.load(in_ptr0 + (4))
tmp20 = tl.broadcast_to(tmp19, [XBLOCK])
tmp21 = tl.load(in_ptr5 + (x2), xmask)
tmp3 = tmp1 * tmp2
tmp7 = tmp5 * tmp6
tmp8 = tmp3 + tmp7
tmp12 = tmp10 * tmp11
tmp13 = tmp8 + tmp12
tmp17 = tmp15 * tmp16
tmp18 = tmp13 + tmp17
tmp22 = tmp20 * tmp21
tmp23 = tmp18 + tmp22
tl.store(out_ptr0 + (x0 + (128*x1)), tmp23, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (5, 1), (1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (10, 1), (1, 1))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [curr_n], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_4, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_3, (16, 4, 4), (16, 4, 1), 0), out=buf0)
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [curr_n_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_6, (16, 4, 4), (16, 4, 1), 0), buf0, out=buf1)
buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [curr_n_2], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_6, (16, 4, 4), (16, 4, 1), 0), buf1, out=buf2)
buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [curr_n_3], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_6, (16, 4, 4), (16, 4, 1), 0), buf2, out=buf3)
buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [curr_p_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_6, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_2, (16, 4, 4), (16, 4, 1), 0), out=buf4)
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [curr_n_aux_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_6, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_3, (16, 4, 4), (16, 4, 1), 0), out=buf5)
del primals_3
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [curr_n_4], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_4, (16, 4, 4), (16, 4, 1), 0), buf5, out=buf6)
buf11 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [curr_n_aux_2], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_6, (16, 4, 4), (16, 4, 1), 0), buf5, out=buf11)
buf12 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [curr_n_7], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_4, (16, 4, 4), (16, 4, 1), 0), buf11, out=buf12)
buf13 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [curr_n_8], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_6, (16, 4, 4), (16, 4, 1), 0), buf12, out=buf13)
buf16 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [curr_n_aux_3], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_6, (16, 4, 4), (16, 4, 1), 0), buf11, out=buf16)
buf17 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [curr_n_9], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_4, (16, 4, 4), (16, 4, 1), 0), buf16, out=buf17)
del primals_4
buf8 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [curr_n_5], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_6, (16, 4, 4), (16, 4, 1), 0), buf6, out=buf8)
buf9 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [curr_n_6], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_6, (16, 4, 4), (16, 4, 1), 0), buf8, out=buf9)
buf21 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
buf20 = reinterpret_tensor(buf21, (4, 4, 4, 4), (128, 16, 4, 1), 64) # alias
# Topologically Sorted Source Nodes: [feat_n_1, mul_2, feat_n_2, mul_3, feat_n_3, mul_4, feat_n_4, mul_6, feat_n_5, mul_7, feat_n_6, mul_8, feat_n_7, mul_10, feat_n_8, mul_11, feat_n_9, mul_13, feat_n_10], Original ATen: [aten.add, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_0.run(primals_5, buf0, buf1, buf2, buf3, buf6, buf8, buf9, buf12, buf13, buf17, buf20, 256, grid=grid(256), stream=stream0)
del primals_5
buf10 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [curr_p_2], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_6, (16, 4, 4), (16, 4, 1), 0), buf4, out=buf10)
buf15 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [curr_p_3], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_6, (16, 4, 4), (16, 4, 1), 0), buf10, out=buf15)
buf18 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [curr_p_4], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_6, (16, 4, 4), (16, 4, 1), 0), buf15, out=buf18)
del primals_6
buf19 = reinterpret_tensor(buf21, (4, 4, 4, 4), (128, 16, 4, 1), 0) # alias
# Topologically Sorted Source Nodes: [feat_p, mul_5, feat_p_1, mul_9, feat_p_2, mul_12, feat_p_3, mul_14, feat_p_4], Original ATen: [aten.mul, aten.add]
triton_poi_fused_add_mul_1.run(primals_1, primals_2, buf4, buf10, buf15, buf18, buf19, 256, grid=grid(256), stream=stream0)
del primals_1
return (buf21, primals_2, buf0, buf1, buf2, buf3, buf4, buf6, buf8, buf9, buf10, buf12, buf13, buf15, buf17, buf18, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((5, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((10, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from typing import Optional
from typing import Tuple
import torch.nn as nn
from torch.nn.parameter import Parameter
from typing import Union
class SIMPA(nn.Module):
"""The signed mixed-path aggregation model.
Args:
hop (int): Number of hops to consider.
directed (bool, optional): Whether the input network is directed or not. (default: :obj:`False`)
"""
def __init__(self, hop: 'int', directed: 'bool'=False):
super(SIMPA, self).__init__()
self._hop_p = hop + 1
self._hop_n = int((1 + hop) * hop / 2)
self._undirected = not directed
if self._undirected:
self._w_p = Parameter(torch.FloatTensor(self._hop_p, 1))
self._w_n = Parameter(torch.FloatTensor(self._hop_n, 1))
self._reset_parameters_undirected()
else:
self._w_sp = Parameter(torch.FloatTensor(self._hop_p, 1))
self._w_sn = Parameter(torch.FloatTensor(self._hop_n, 1))
self._w_tp = Parameter(torch.FloatTensor(self._hop_p, 1))
self._w_tn = Parameter(torch.FloatTensor(self._hop_n, 1))
self._reset_parameters_directed()
def _reset_parameters_undirected(self):
self._w_p.data.fill_(1.0)
self._w_n.data.fill_(1.0)
def _reset_parameters_directed(self):
self._w_sp.data.fill_(1.0)
self._w_sn.data.fill_(1.0)
self._w_tp.data.fill_(1.0)
self._w_tn.data.fill_(1.0)
def forward(self, A_p:
'Union[torch.FloatTensor, torch.sparse_coo_tensor]', A_n:
'Union[torch.FloatTensor, torch.sparse_coo_tensor]', x_p:
'torch.FloatTensor', x_n: 'torch.FloatTensor', x_pt:
'Optional[torch.FloatTensor]'=None, x_nt:
'Optional[torch.FloatTensor]'=None, A_pt:
'Optional[Union[torch.FloatTensor, torch.sparse_coo_tensor]]'=None,
A_nt: 'Optional[Union[torch.FloatTensor, torch.sparse_coo_tensor]]'
=None) ->Tuple[torch.FloatTensor, torch.FloatTensor, torch.
LongTensor, torch.FloatTensor]:
"""
Making a forward pass of SIMPA.
Arg types:
* **A_p** (PyTorch FloatTensor or PyTorch sparse_coo_tensor) - Row-normalized positive part of the adjacency matrix.
* **A_n** (PyTorch FloatTensor or PyTorch sparse_coo_tensor) - Row-normalized negative part of the adjacency matrix.
* **x_p** (PyTorch FloatTensor) - Souce positive hidden representations.
* **x_n** (PyTorch FloatTensor) - Souce negative hidden representations.
* **x_pt** (PyTorch FloatTensor, optional) - Target positive hidden representations. Default: None.
* **x_nt** (PyTorch FloatTensor, optional) - Target negative hidden representations. Default: None.
* **A_pt** (PyTorch FloatTensor or PyTorch sparse_coo_tensor, optional) - Transpose of column-normalized
positive part of the adjacency matrix. Default: None.
* **A_nt** (PyTorch FloatTensor or PyTorch sparse_coo_tensor, optional) - Transpose of column-normalized
negative part of the adjacency matrix. Default: None.
Return types:
* **feat** (PyTorch FloatTensor) - Embedding matrix, with shape (num_nodes, 2*input_dim) for undirected graphs
and (num_nodes, 4*input_dim) for directed graphs.
"""
if self._undirected:
feat_p = self._w_p[0] * x_p
feat_n = torch.zeros_like(feat_p)
curr_p = x_p.clone()
curr_n_aux = x_n.clone()
j = 0
for h in range(0, self._hop_p):
if h > 0:
curr_p = torch.matmul(A_p, curr_p)
curr_n_aux = torch.matmul(A_p, curr_n_aux)
feat_p += self._w_p[h] * curr_p
if h != self._hop_p - 1:
curr_n = torch.matmul(A_n, curr_n_aux)
feat_n += self._w_n[j] * curr_n
j += 1
for _ in range(self._hop_p - 2 - h):
curr_n = torch.matmul(A_p, curr_n)
feat_n += self._w_n[j] * curr_n
j += 1
feat = torch.cat([feat_p, feat_n], dim=1)
else:
A_sp = A_p
A_sn = A_n
A_tp = A_pt
A_tn = A_nt
x_sp = x_p
x_sn = x_n
feat_sp = self._w_sp[0] * x_sp
feat_sn = torch.zeros_like(feat_sp)
feat_tp = self._w_tp[0] * x_pt
feat_tn = torch.zeros_like(feat_tp)
curr_sp = x_sp.clone()
curr_sn_aux = x_sn.clone()
curr_tp = x_pt.clone()
curr_tn_aux = x_nt.clone()
j = 0
for h in range(0, self._hop_p):
if h > 0:
curr_sp = torch.matmul(A_sp, curr_sp)
curr_sn_aux = torch.matmul(A_sp, curr_sn_aux)
curr_tp = torch.matmul(A_tp, curr_tp)
curr_tn_aux = torch.matmul(A_tp, curr_tn_aux)
feat_sp += self._w_sp[h] * curr_sp
feat_tp += self._w_tp[h] * curr_tp
if h != self._hop_p - 1:
curr_sn = torch.matmul(A_sn, curr_sn_aux)
curr_tn = torch.matmul(A_tn, curr_tn_aux)
feat_sn += self._w_sn[j] * curr_sn
feat_tn += self._w_tn[j] * curr_tn
j += 1
for _ in range(self._hop_p - 2 - h):
curr_sn = torch.matmul(A_sp, curr_sn)
curr_tn = torch.matmul(A_tp, curr_tn)
feat_sn += self._w_sn[j] * curr_sn
feat_tn += self._w_tn[j] * curr_tn
j += 1
feat = torch.cat([feat_sp, feat_sn, feat_tp, feat_tn], dim=1)
return feat
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hop': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
x1 = xindex % 64
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp4 = tl.load(in_ptr0 + 1)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp6 = tl.load(in_ptr2 + x0, xmask)
tmp9 = tl.load(in_ptr0 + 2)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK])
tmp11 = tl.load(in_ptr3 + x0, xmask)
tmp14 = tl.load(in_ptr0 + 3)
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp16 = tl.load(in_ptr4 + x0, xmask)
tmp19 = tl.load(in_ptr0 + 4)
tmp20 = tl.broadcast_to(tmp19, [XBLOCK])
tmp21 = tl.load(in_ptr5 + x0, xmask)
tmp24 = tl.load(in_ptr0 + 5)
tmp25 = tl.broadcast_to(tmp24, [XBLOCK])
tmp26 = tl.load(in_ptr6 + x0, xmask)
tmp29 = tl.load(in_ptr0 + 6)
tmp30 = tl.broadcast_to(tmp29, [XBLOCK])
tmp31 = tl.load(in_ptr7 + x0, xmask)
tmp34 = tl.load(in_ptr0 + 7)
tmp35 = tl.broadcast_to(tmp34, [XBLOCK])
tmp36 = tl.load(in_ptr8 + x0, xmask)
tmp39 = tl.load(in_ptr0 + 8)
tmp40 = tl.broadcast_to(tmp39, [XBLOCK])
tmp41 = tl.load(in_ptr9 + x0, xmask)
tmp44 = tl.load(in_ptr0 + 9)
tmp45 = tl.broadcast_to(tmp44, [XBLOCK])
tmp46 = tl.load(in_ptr10 + x0, xmask)
tmp3 = tmp1 * tmp2
tmp7 = tmp5 * tmp6
tmp8 = tmp3 + tmp7
tmp12 = tmp10 * tmp11
tmp13 = tmp8 + tmp12
tmp17 = tmp15 * tmp16
tmp18 = tmp13 + tmp17
tmp22 = tmp20 * tmp21
tmp23 = tmp18 + tmp22
tmp27 = tmp25 * tmp26
tmp28 = tmp23 + tmp27
tmp32 = tmp30 * tmp31
tmp33 = tmp28 + tmp32
tmp37 = tmp35 * tmp36
tmp38 = tmp33 + tmp37
tmp42 = tmp40 * tmp41
tmp43 = tmp38 + tmp42
tmp47 = tmp45 * tmp46
tmp48 = tmp43 + tmp47
tl.store(out_ptr0 + (x1 + 128 * x2), tmp48, xmask)
@triton.jit
def triton_poi_fused_add_mul_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
x1 = xindex // 64
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr0 + 1)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp6 = tl.load(in_ptr2 + x2, xmask)
tmp9 = tl.load(in_ptr0 + 2)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK])
tmp11 = tl.load(in_ptr3 + x2, xmask)
tmp14 = tl.load(in_ptr0 + 3)
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp16 = tl.load(in_ptr4 + x2, xmask)
tmp19 = tl.load(in_ptr0 + 4)
tmp20 = tl.broadcast_to(tmp19, [XBLOCK])
tmp21 = tl.load(in_ptr5 + x2, xmask)
tmp3 = tmp1 * tmp2
tmp7 = tmp5 * tmp6
tmp8 = tmp3 + tmp7
tmp12 = tmp10 * tmp11
tmp13 = tmp8 + tmp12
tmp17 = tmp15 * tmp16
tmp18 = tmp13 + tmp17
tmp22 = tmp20 * tmp21
tmp23 = tmp18 + tmp22
tl.store(out_ptr0 + (x0 + 128 * x1), tmp23, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (5, 1), (1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (10, 1), (1, 1))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_4, (16, 4, 4), (16, 4,
1), 0), reinterpret_tensor(primals_3, (16, 4, 4), (16, 4, 1), 0
), out=buf0)
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_6, (16, 4, 4), (16, 4,
1), 0), buf0, out=buf1)
buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_6, (16, 4, 4), (16, 4,
1), 0), buf1, out=buf2)
buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_6, (16, 4, 4), (16, 4,
1), 0), buf2, out=buf3)
buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_6, (16, 4, 4), (16, 4,
1), 0), reinterpret_tensor(primals_2, (16, 4, 4), (16, 4, 1), 0
), out=buf4)
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_6, (16, 4, 4), (16, 4,
1), 0), reinterpret_tensor(primals_3, (16, 4, 4), (16, 4, 1), 0
), out=buf5)
del primals_3
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_4, (16, 4, 4), (16, 4,
1), 0), buf5, out=buf6)
buf11 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_6, (16, 4, 4), (16, 4,
1), 0), buf5, out=buf11)
buf12 = buf5
del buf5
extern_kernels.bmm(reinterpret_tensor(primals_4, (16, 4, 4), (16, 4,
1), 0), buf11, out=buf12)
buf13 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_6, (16, 4, 4), (16, 4,
1), 0), buf12, out=buf13)
buf16 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_6, (16, 4, 4), (16, 4,
1), 0), buf11, out=buf16)
buf17 = buf11
del buf11
extern_kernels.bmm(reinterpret_tensor(primals_4, (16, 4, 4), (16, 4,
1), 0), buf16, out=buf17)
del primals_4
buf8 = buf16
del buf16
extern_kernels.bmm(reinterpret_tensor(primals_6, (16, 4, 4), (16, 4,
1), 0), buf6, out=buf8)
buf9 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_6, (16, 4, 4), (16, 4,
1), 0), buf8, out=buf9)
buf21 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32
)
buf20 = reinterpret_tensor(buf21, (4, 4, 4, 4), (128, 16, 4, 1), 64)
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(256)](primals_5, buf0, buf1, buf2,
buf3, buf6, buf8, buf9, buf12, buf13, buf17, buf20, 256, XBLOCK
=128, num_warps=4, num_stages=1)
del primals_5
buf10 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_6, (16, 4, 4), (16, 4,
1), 0), buf4, out=buf10)
buf15 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_6, (16, 4, 4), (16, 4,
1), 0), buf10, out=buf15)
buf18 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_6, (16, 4, 4), (16, 4,
1), 0), buf15, out=buf18)
del primals_6
buf19 = reinterpret_tensor(buf21, (4, 4, 4, 4), (128, 16, 4, 1), 0)
triton_poi_fused_add_mul_1[grid(256)](primals_1, primals_2, buf4,
buf10, buf15, buf18, buf19, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_1
return (buf21, primals_2, buf0, buf1, buf2, buf3, buf4, buf6, buf8,
buf9, buf10, buf12, buf13, buf15, buf17, buf18)
class SIMPANew(nn.Module):
"""The signed mixed-path aggregation model.
Args:
hop (int): Number of hops to consider.
directed (bool, optional): Whether the input network is directed or not. (default: :obj:`False`)
"""
def __init__(self, hop: 'int', directed: 'bool'=False):
super(SIMPANew, self).__init__()
self._hop_p = hop + 1
self._hop_n = int((1 + hop) * hop / 2)
self._undirected = not directed
if self._undirected:
self._w_p = Parameter(torch.FloatTensor(self._hop_p, 1))
self._w_n = Parameter(torch.FloatTensor(self._hop_n, 1))
self._reset_parameters_undirected()
else:
self._w_sp = Parameter(torch.FloatTensor(self._hop_p, 1))
self._w_sn = Parameter(torch.FloatTensor(self._hop_n, 1))
self._w_tp = Parameter(torch.FloatTensor(self._hop_p, 1))
self._w_tn = Parameter(torch.FloatTensor(self._hop_n, 1))
self._reset_parameters_directed()
def _reset_parameters_undirected(self):
self._w_p.data.fill_(1.0)
self._w_n.data.fill_(1.0)
def _reset_parameters_directed(self):
self._w_sp.data.fill_(1.0)
self._w_sn.data.fill_(1.0)
self._w_tp.data.fill_(1.0)
self._w_tn.data.fill_(1.0)
def forward(self, input_0, input_1, input_2, input_3):
primals_1 = self._w_p
primals_5 = self._w_n
primals_2 = input_0
primals_3 = input_1
primals_4 = input_2
primals_6 = input_3
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| SherylHYX/SSSNET_Signed_Clustering | SIMPA | false | 17,918 | [
"MIT"
] | 5 | 85736c18e86b396d64177d22b8c7f9859dfd794c | https://github.com/SherylHYX/SSSNET_Signed_Clustering/tree/85736c18e86b396d64177d22b8c7f9859dfd794c |
SparseConv2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/ii/cii4zjyfodp22hyu2dar7qf65pu2eguehffaedzjbwqjcolfxjo7.py
# Topologically Sorted Source Nodes: [w], Original ATen: [aten.sort]
# Source node to ATen node mapping:
# w => sort
# Graph fragment:
# %sort : [num_users=1] = call_function[target=torch.ops.aten.sort.default](args = (%view, 1), kwargs = {})
triton_per_fused_sort_0 = async_compile.triton('triton_per_fused_sort_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i16', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_sort_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_sort_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (4*x0)), xmask, other=0.0)
tmp1 = tl_math.abs(tmp0)
tmp2 = r1
tmp3 = tmp2.to(tl.int16)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6, tmp7, = triton_helpers.sort_with_index(tmp4, tmp5, None, 1, stable=False, descending=False)
tl.store(out_ptr0 + (r1 + (4*x0)), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/pn/cpnxvehebdngucnsxf2x2oyexxciqnbcl5oiiuxlr7mxglbycxzw.py
# Topologically Sorted Source Nodes: [w], Original ATen: [aten.ones, aten.scatter]
# Source node to ATen node mapping:
# w => full_default, scatter
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([64, 4], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %scatter : [num_users=1] = call_function[target=torch.ops.aten.scatter.value](args = (%full_default, 1, %slice_2, 0), kwargs = {})
triton_poi_fused_ones_scatter_1 = async_compile.triton('triton_poi_fused_ones_scatter_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_ones_scatter_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_ones_scatter_1(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 1.0
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/oq/coqjpmu7hs6q7gofetkbgkyp3r62xkktkzxi6vus5hes5oswju3e.py
# Topologically Sorted Source Nodes: [w], Original ATen: [aten.ones, aten.scatter]
# Source node to ATen node mapping:
# w => full_default, scatter
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([64, 4], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %scatter : [num_users=1] = call_function[target=torch.ops.aten.scatter.value](args = (%full_default, 1, %slice_2, 0), kwargs = {})
triton_poi_fused_ones_scatter_2 = async_compile.triton('triton_poi_fused_ones_scatter_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*i16', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_ones_scatter_2', 'mutated_arg_names': ['out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_ones_scatter_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2)
tmp0 = tl.load(in_ptr0 + (x0 + (4*x1)), xmask)
tmp1 = tmp0.to(tl.int64)
tl.device_assert(((0 <= tmp1) & (tmp1 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp1 < 4")
tmp3 = 0.0
tl.store(out_ptr0 + (tmp1 + (4*x1)), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/io/ciog7eausg5dh3ul73nafgn5h7u4ljdnkmllfdxyszvulb5puvwj.py
# Topologically Sorted Source Nodes: [w], Original ATen: [aten.mul, aten.rsub]
# Source node to ATen node mapping:
# w => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %view_2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %view_2), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, 0.0002), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %primals_1), kwargs = {})
triton_poi_fused_mul_rsub_3 = async_compile.triton('triton_poi_fused_mul_rsub_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_rsub_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_rsub_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp1
tmp5 = 0.0002
tmp6 = tmp4 * tmp5
tmp7 = tmp6 * tmp0
tl.store(out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr1 + (x0), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/oo/coof7rew5kkdnoyby7tsaw5hjqkw6lw4xv7gw5mq3lhatfsgfetu.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %mul, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_4 = async_compile.triton('triton_poi_fused_convolution_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.int16)
# Topologically Sorted Source Nodes: [w], Original ATen: [aten.sort]
stream0 = get_raw_stream(0)
triton_per_fused_sort_0.run(primals_1, buf1, 64, 4, grid=grid(64), stream=stream0)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [w], Original ATen: [aten.ones, aten.scatter]
triton_poi_fused_ones_scatter_1.run(buf2, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [w], Original ATen: [aten.ones, aten.scatter]
triton_poi_fused_ones_scatter_2.run(buf1, buf2, 128, grid=grid(128), stream=stream0)
del buf1
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [w], Original ATen: [aten.mul, aten.rsub]
triton_poi_fused_mul_rsub_3.run(primals_1, buf2, buf4, buf7, 256, grid=grid(256), stream=stream0)
del buf2
del primals_1
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(primals_3, buf4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 1, 1), (4, 1, 1, 1))
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
triton_poi_fused_convolution_4.run(buf6, primals_2, 16, grid=grid(16), stream=stream0)
del primals_2
return (buf6, primals_3, buf4, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import autograd
class Sparse(autograd.Function):
""""
Prune the unimprotant weight for the forwards phase,
but pass the gradient to dense weight using SR-STE in the backwards phase
"""
@staticmethod
def forward(ctx, weight, N, M, decay=0.0002):
ctx.save_for_backward(weight)
output = weight.clone()
length = weight.numel()
group = int(length / M)
weight_temp = weight.detach().abs().reshape(group, M)
index = torch.argsort(weight_temp, dim=1)[:, :int(M - N)]
w_b = torch.ones(weight_temp.shape, device=weight_temp.device)
w_b = w_b.scatter_(dim=1, index=index, value=0).reshape(weight.shape)
ctx.mask = w_b
ctx.decay = decay
return output * w_b
@staticmethod
def backward(ctx, grad_output):
weight, = ctx.saved_tensors
return grad_output + ctx.decay * (1 - ctx.mask) * weight, None, None
class SparseConv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros', N
=2, M=4, **kwargs):
self.N = N
self.M = M
super(SparseConv2d, self).__init__(in_channels, out_channels,
kernel_size, stride, padding, dilation, groups, bias,
padding_mode, **kwargs)
def get_sparse_weights(self):
return Sparse.apply(self.weight, self.N, self.M)
def forward(self, x):
w = self.get_sparse_weights()
x = F.conv2d(x, w, self.bias, self.stride, self.padding, self.
dilation, self.groups)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from torch import autograd
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_sort_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0)
tmp1 = tl_math.abs(tmp0)
tmp2 = r1
tmp3 = tmp2.to(tl.int16)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
_tmp6, tmp7 = triton_helpers.sort_with_index(tmp4, tmp5, None, 1,
stable=False, descending=False)
tl.store(out_ptr0 + (r1 + 4 * x0), tmp7, xmask)
@triton.jit
def triton_poi_fused_ones_scatter_1(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 1.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_ones_scatter_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1), xmask)
tmp1 = tmp0.to(tl.int64)
tl.device_assert((0 <= tmp1) & (tmp1 < 4) | ~xmask,
'index out of bounds: 0 <= tmp1 < 4')
tmp3 = 0.0
tl.store(out_ptr0 + (tmp1 + 4 * x1), tmp3, xmask)
@triton.jit
def triton_poi_fused_mul_rsub_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp1
tmp5 = 0.0002
tmp6 = tmp4 * tmp5
tmp7 = tmp6 * tmp0
tl.store(out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr1 + x0, tmp7, xmask)
@triton.jit
def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.int16)
get_raw_stream(0)
triton_per_fused_sort_0[grid(64)](primals_1, buf1, 64, 4, XBLOCK=32,
num_warps=2, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused_ones_scatter_1[grid(256)](buf2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
triton_poi_fused_ones_scatter_2[grid(128)](buf1, buf2, 128, XBLOCK=
128, num_warps=4, num_stages=1)
del buf1
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_rsub_3[grid(256)](primals_1, buf2, buf4, buf7,
256, XBLOCK=256, num_warps=4, num_stages=1)
del buf2
del primals_1
buf5 = extern_kernels.convolution(primals_3, buf4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 1, 1), (4, 1, 1, 1))
buf6 = buf5
del buf5
triton_poi_fused_convolution_4[grid(16)](buf6, primals_2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return buf6, primals_3, buf4, buf7
class Sparse(autograd.Function):
""""
Prune the unimprotant weight for the forwards phase,
but pass the gradient to dense weight using SR-STE in the backwards phase
"""
@staticmethod
def forward(ctx, weight, N, M, decay=0.0002):
ctx.save_for_backward(weight)
output = weight.clone()
length = weight.numel()
group = int(length / M)
weight_temp = weight.detach().abs().reshape(group, M)
index = torch.argsort(weight_temp, dim=1)[:, :int(M - N)]
w_b = torch.ones(weight_temp.shape, device=weight_temp.device)
w_b = w_b.scatter_(dim=1, index=index, value=0).reshape(weight.shape)
ctx.mask = w_b
ctx.decay = decay
return output * w_b
@staticmethod
def backward(ctx, grad_output):
weight, = ctx.saved_tensors
return grad_output + ctx.decay * (1 - ctx.mask) * weight, None, None
class SparseConv2dNew(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros', N
=2, M=4, **kwargs):
self.N = N
self.M = M
super(SparseConv2dNew, self).__init__(in_channels, out_channels,
kernel_size, stride, padding, dilation, groups, bias,
padding_mode, **kwargs)
def get_sparse_weights(self):
return Sparse.apply(self.weight, self.N, self.M)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| Sense-GVT/BigPretrain | SparseConv2d | false | 17,920 | [
"Apache-2.0"
] | 8 | d8d9b43d94dd1364c18c1e5ba21b85a31cdbba9e | https://github.com/Sense-GVT/BigPretrain/tree/d8d9b43d94dd1364c18c1e5ba21b85a31cdbba9e |
GCNConv_diag | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/p7/cp7bcgmypisbzybkq2wwegzxvgofvozth3exh24yu77qhzvvgn2e.py
# Topologically Sorted Source Nodes: [diag], Original ATen: [aten.diag_embed]
# Source node to ATen node mapping:
# diag => eq, full_default, iota, where
# Graph fragment:
# %iota : [num_users=2] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%iota, %unsqueeze_1), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %permute, %full_default), kwargs = {})
triton_poi_fused_diag_embed_0 = async_compile.triton('triton_poi_fused_diag_embed_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_diag_embed_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_diag_embed_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp3 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp0 = x0
tmp1 = x1
tmp2 = tmp0 == tmp1
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + (x2), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [diag], Original ATen: [aten.diag_embed]
stream0 = get_raw_stream(0)
triton_poi_fused_diag_embed_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [diag, hidden], Original ATen: [aten.diag_embed, aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf0, out=buf1)
del buf0
buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), out=buf2)
del buf1
return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from sklearn.metrics.pairwise import *
from torch.optim.lr_scheduler import *
class GCNConv_diag(torch.nn.Module):
"""
A GCN convolution layer of diagonal matrix multiplication
"""
def __init__(self, input_size, device):
super(GCNConv_diag, self).__init__()
self.W = torch.nn.Parameter(torch.ones(input_size))
self.input_size = input_size
def init_para(self):
self.W = torch.nn.Parameter(torch.ones(self.input_size))
def forward(self, input, A, sparse=False):
hidden = input @ torch.diag(self.W)
if sparse:
output = torch.sparse.mm(A, hidden)
else:
output = torch.matmul(A, hidden)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'device': 0}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from sklearn.metrics.pairwise import *
from torch.optim.lr_scheduler import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_diag_embed_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp0 = x0
tmp1 = x1
tmp2 = tmp0 == tmp1
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + x2, tmp5, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_diag_embed_0[grid(16)](primals_1, buf0, 16, XBLOCK
=16, num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
buf0, out=buf1)
del buf0
buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_3, (16, 4, 4), (16, 4,
1), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0),
out=buf2)
del buf1
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0)
class GCNConv_diagNew(torch.nn.Module):
"""
A GCN convolution layer of diagonal matrix multiplication
"""
def __init__(self, input_size, device):
super(GCNConv_diagNew, self).__init__()
self.W = torch.nn.Parameter(torch.ones(input_size))
self.input_size = input_size
def init_para(self):
self.W = torch.nn.Parameter(torch.ones(self.input_size))
def forward(self, input_0, input_1):
primals_1 = self.W
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
| STK101/GRCN | GCNConv_diag | false | 17,921 | [
"MIT"
] | 4 | 7389000a13d5969bcc77dc4cf73a4107acc68403 | https://github.com/STK101/GRCN/tree/7389000a13d5969bcc77dc4cf73a4107acc68403 |
Balance_Theory | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/wv/cwvwn4skuzp6hvqejtyxfz5qh5bqj7bnmxjbe4yznbquqqgow5kx.py
# Topologically Sorted Source Nodes: [x_p_1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_p_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%mm,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/qg/cqg5nly2dcrzjkouuwxdhbgjazd2jwbjc7xci3etispat7w7hkdt.py
# Topologically Sorted Source Nodes: [feat, normalize], Original ATen: [aten.cat, aten.linalg_vector_norm, aten.div]
# Source node to ATen node mapping:
# feat => cat
# normalize => div_1, pow_1, pow_2, sum_3
# Graph fragment:
# %cat : [num_users=4] = call_function[target=torch.ops.aten.cat.default](args = ([%add_5, %add_3], 1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%cat, 2.0), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {})
# %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_3, 0.5), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%cat, %expand), kwargs = {})
triton_per_fused_cat_div_linalg_vector_norm_1 = async_compile.triton('triton_per_fused_cat_div_linalg_vector_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 8],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: 'i32', 13: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_cat_div_linalg_vector_norm_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 14, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_cat_div_linalg_vector_norm_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 8
RBLOCK: tl.constexpr = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp5 = tl.load(in_ptr0 + (0))
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp9 = tl.load(in_ptr0 + (1))
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp14 = tl.load(in_ptr0 + (2))
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp19 = tl.load(in_ptr0 + (3))
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp29 = tl.load(in_ptr5 + (0))
tmp30 = tl.broadcast_to(tmp29, [XBLOCK, RBLOCK])
tmp33 = tl.load(in_ptr5 + (1))
tmp34 = tl.broadcast_to(tmp33, [XBLOCK, RBLOCK])
tmp38 = tl.load(in_ptr5 + (2))
tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK])
tmp0 = r1
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp7 = tl.load(in_ptr1 + ((4*x0) + r1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 * tmp7
tmp11 = tl.load(in_ptr2 + ((4*x0) + r1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp10 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tl.load(in_ptr3 + ((4*x0) + r1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tmp15 * tmp16
tmp18 = tmp13 + tmp17
tmp21 = tl.load(in_ptr4 + ((4*x0) + r1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp22 = tmp20 * tmp21
tmp23 = tmp18 + tmp22
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp4, tmp23, tmp24)
tmp26 = tmp0 >= tmp3
tmp27 = tl.full([1, 1], 8, tl.int64)
tmp28 = tmp0 < tmp27
tmp31 = tl.load(in_ptr6 + ((4*x0) + ((-4) + r1)), tmp26 & xmask, eviction_policy='evict_last', other=0.0)
tmp32 = tmp30 * tmp31
tmp35 = tl.load(in_ptr7 + ((4*x0) + ((-4) + r1)), tmp26 & xmask, eviction_policy='evict_last', other=0.0)
tmp36 = tmp34 * tmp35
tmp37 = tmp32 + tmp36
tmp40 = tl.load(in_ptr8 + ((4*x0) + ((-4) + r1)), tmp26 & xmask, eviction_policy='evict_last', other=0.0)
tmp41 = tmp39 * tmp40
tmp42 = tmp37 + tmp41
tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype)
tmp44 = tl.where(tmp26, tmp42, tmp43)
tmp45 = tl.where(tmp4, tmp25, tmp44)
tmp46 = tmp45 * tmp45
tmp47 = tl.broadcast_to(tmp46, [XBLOCK, RBLOCK])
tmp49 = tl.where(xmask, tmp47, 0)
tmp50 = tl.sum(tmp49, 1)[:, None]
tmp51 = libdevice.sqrt(tmp50)
tmp52 = 1e-12
tmp53 = triton_helpers.maximum(tmp51, tmp52)
tmp54 = tmp45 / tmp53
tl.store(out_ptr0 + (r1 + (8*x0)), tmp45, xmask)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp51, xmask)
tl.store(out_ptr1 + (r1 + (8*x0)), tmp54, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ox/coxsrblmbh4clbj6u7l2y3kwzachaknerspxlgcpfe75zzvq5tsv.py
# Topologically Sorted Source Nodes: [predictions_cluster], Original ATen: [aten.argmax]
# Source node to ATen node mapping:
# predictions_cluster => argmax
# Graph fragment:
# %argmax : [num_users=1] = call_function[target=torch.ops.aten.argmax.default](args = (%addmm_default, 1), kwargs = {})
triton_poi_fused_argmax_2 = async_compile.triton('triton_poi_fused_argmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_argmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_argmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 > tmp1
tmp3 = tmp0 == tmp1
tmp4 = tmp0 != tmp0
tmp5 = tmp1 != tmp1
tmp6 = tmp4 > tmp5
tmp7 = tmp2 | tmp6
tmp8 = tmp4 & tmp5
tmp9 = tmp3 | tmp8
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = tl.full([1], 1, tl.int64)
tmp12 = tmp10 < tmp11
tmp13 = tmp9 & tmp12
tmp14 = tmp7 | tmp13
tmp15 = tl.where(tmp14, tmp0, tmp1)
tmp16 = tl.where(tmp14, tmp10, tmp11)
tmp18 = tmp15 > tmp17
tmp19 = tmp15 == tmp17
tmp20 = tmp15 != tmp15
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1], 2, tl.int64)
tmp27 = tmp16 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tmp23 | tmp28
tmp30 = tl.where(tmp29, tmp15, tmp17)
tmp31 = tl.where(tmp29, tmp16, tmp26)
tmp33 = tmp30 > tmp32
tmp34 = tmp30 == tmp32
tmp35 = tmp30 != tmp30
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1], 3, tl.int64)
tmp42 = tmp31 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tmp45 = tl.where(tmp44, tmp30, tmp32)
tmp46 = tl.where(tmp44, tmp31, tmp41)
tl.store(out_ptr0 + (x0), tmp46, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/qk/cqklaukls6mb7pxyhr4fpn4vpxnigocf7k3liofdy6zlzjgsmar7.py
# Topologically Sorted Source Nodes: [prob], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# prob => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_default, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_default, %amax), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/fn/cfnsfxhsfne5ixwt6qtyl2yls4km74n7aitolg6zz3iitjyjfblc.py
# Topologically Sorted Source Nodes: [prob, output_2], Original ATen: [aten._softmax, aten._log_softmax]
# Source node to ATen node mapping:
# output_2 => log, sub_2
# prob => div, exp, sum_1
# Graph fragment:
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_poi_fused__log_softmax__softmax_4 = async_compile.triton('triton_poi_fused__log_softmax__softmax_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax__softmax_4(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp2 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl_math.exp(tmp0)
tmp3 = tl_math.exp(tmp2)
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp3 + tmp5
tmp8 = tl_math.exp(tmp7)
tmp9 = tmp6 + tmp8
tmp11 = tl_math.exp(tmp10)
tmp12 = tmp9 + tmp11
tmp13 = tmp1 / tmp12
tmp14 = tl_math.log(tmp12)
tmp15 = tmp0 - tmp14
tl.store(out_ptr0 + (x2), tmp13, xmask)
tl.store(out_ptr1 + (x2), tmp15, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, 1), (1, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (3, 1), (1, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (8, 4), (4, 1))
assert_size_stride(primals_11, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_p], Original ATen: [aten.mm]
extern_kernels.mm(primals_2, primals_1, out=buf0)
del primals_1
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_p_1], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_p_3], Original ATen: [aten.mm]
extern_kernels.mm(buf1, primals_3, out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_n], Original ATen: [aten.mm]
extern_kernels.mm(primals_2, primals_4, out=buf3)
del primals_4
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [x_n_1], Original ATen: [aten.relu]
triton_poi_fused_relu_0.run(buf4, 16, grid=grid(16), stream=stream0)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_n_3], Original ATen: [aten.mm]
extern_kernels.mm(buf4, primals_5, out=buf5)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [curr_n], Original ATen: [aten.mm]
extern_kernels.mm(primals_7, buf5, out=buf6)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [curr_n_1], Original ATen: [aten.mm]
extern_kernels.mm(primals_9, buf6, out=buf7)
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [curr_p_1], Original ATen: [aten.mm]
extern_kernels.mm(primals_9, buf2, out=buf8)
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [curr_n_aux_1], Original ATen: [aten.mm]
extern_kernels.mm(primals_9, buf5, out=buf9)
buf10 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [curr_n_2], Original ATen: [aten.mm]
extern_kernels.mm(primals_7, buf9, out=buf10)
buf11 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [curr_p_2], Original ATen: [aten.mm]
extern_kernels.mm(primals_9, buf8, out=buf11)
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_7], Original ATen: [aten.mm]
extern_kernels.mm(primals_7, buf2, out=buf12)
buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_8], Original ATen: [aten.mm]
extern_kernels.mm(primals_7, buf12, out=buf13)
buf14 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
buf20 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf21 = reinterpret_tensor(buf20, (4, 1), (1, 1), 0); del buf20 # reuse
buf22 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [feat, normalize], Original ATen: [aten.cat, aten.linalg_vector_norm, aten.div]
triton_per_fused_cat_div_linalg_vector_norm_1.run(buf21, primals_6, buf2, buf8, buf11, buf13, primals_8, buf6, buf7, buf10, buf14, buf22, 4, 8, grid=grid(4), stream=stream0)
buf15 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.addmm(primals_11, buf14, primals_10, alpha=1, beta=1, out=buf15)
del primals_11
buf16 = empty_strided_cuda((4, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [predictions_cluster], Original ATen: [aten.argmax]
triton_poi_fused_argmax_2.run(buf15, buf16, 4, grid=grid(4), stream=stream0)
buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [prob], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf15, buf17, 16, grid=grid(16), stream=stream0)
buf18 = buf15; del buf15 # reuse
buf19 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [prob, output_2], Original ATen: [aten._softmax, aten._log_softmax]
triton_poi_fused__log_softmax__softmax_4.run(buf17, buf18, buf19, 16, grid=grid(16), stream=stream0)
del buf17
return (buf22, buf19, buf16, buf18, primals_6, primals_8, buf1, buf2, buf4, buf6, buf7, buf8, buf10, buf11, buf13, buf14, buf18, buf19, buf21, reinterpret_tensor(primals_10, (4, 8), (1, 4), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((3, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from typing import Optional
from typing import Tuple
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from typing import Union
class Balance_Theory(nn.Module):
"""The signed graph clustering model with balance theory, restricted to 2 hops for fair comparison with SSSNET.
Args:
nfeat (int): Number of features.
hidden (int): Hidden dimensions of the initial MLP.
nclass (int): Number of clusters.
dropout (float): Dropout probability.
hop (int): Number of hops to consider. (need to be 2)
directed (bool, optional): Whether the input network is directed or not. (default: :obj:`False`)
bias (bool, optional): If set to :obj:`False`, the layer will not learn
an additive bias. (default: :obj:`True`)
"""
def __init__(self, nfeat: 'int', hidden: 'int', nclass: 'int', dropout:
'float', hop: 'int', directed: 'bool'=False, bias: 'bool'=True):
super(Balance_Theory, self).__init__()
nh1 = hidden
nh2 = hidden
self._num_clusters = int(nclass)
assert hop == 2, 'please only use 2 hops'
self._hop_p = 4
self._hop_n = 3
if bias:
self._bias = Parameter(torch.FloatTensor(self._num_clusters))
else:
self.register_parameter('_bias', None)
self._relu = nn.ReLU()
self._dropout = nn.Dropout(p=dropout)
self._undirected = not directed
if self._undirected:
self._w_p0 = Parameter(torch.FloatTensor(nfeat, nh1))
self._w_p1 = Parameter(torch.FloatTensor(nh1, nh2))
self._w_n0 = Parameter(torch.FloatTensor(nfeat, nh1))
self._w_n1 = Parameter(torch.FloatTensor(nh1, nh2))
self._w_p = Parameter(torch.FloatTensor(self._hop_p, 1))
self._w_n = Parameter(torch.FloatTensor(self._hop_n, 1))
self._W_prob = Parameter(torch.FloatTensor(2 * nh2, self.
_num_clusters))
self._reset_parameters_undirected()
else:
self._w_sp0 = Parameter(torch.FloatTensor(nfeat, nh1))
self._w_sp1 = Parameter(torch.FloatTensor(nh1, nh2))
self._w_sn0 = Parameter(torch.FloatTensor(nfeat, nh1))
self._w_sn1 = Parameter(torch.FloatTensor(nh1, nh2))
self._w_tp0 = Parameter(torch.FloatTensor(nfeat, nh1))
self._w_tp1 = Parameter(torch.FloatTensor(nh1, nh2))
self._w_tn0 = Parameter(torch.FloatTensor(nfeat, nh1))
self._w_tn1 = Parameter(torch.FloatTensor(nh1, nh2))
self._w_sp = Parameter(torch.FloatTensor(self._hop_p, 1))
self._w_sn = Parameter(torch.FloatTensor(self._hop_n, 1))
self._w_tp = Parameter(torch.FloatTensor(self._hop_p, 1))
self._w_tn = Parameter(torch.FloatTensor(self._hop_n, 1))
self._W_prob = Parameter(torch.FloatTensor(4 * nh2, self.
_num_clusters))
self._reset_parameters_directed()
def _reset_parameters_undirected(self):
self._w_p.data.fill_(1.0)
self._w_n.data.fill_(1.0)
nn.init.xavier_uniform_(self._w_p0, gain=1.414)
nn.init.xavier_uniform_(self._w_p1, gain=1.414)
nn.init.xavier_uniform_(self._w_n0, gain=1.414)
nn.init.xavier_uniform_(self._w_n1, gain=1.414)
if self._bias is not None:
self._bias.data.fill_(0.0)
nn.init.xavier_uniform_(self._W_prob, gain=1.414)
def _reset_parameters_directed(self):
self._w_sp.data.fill_(1.0)
self._w_sn.data.fill_(1.0)
self._w_tp.data.fill_(1.0)
self._w_tn.data.fill_(1.0)
nn.init.xavier_uniform_(self._w_sp0, gain=1.414)
nn.init.xavier_uniform_(self._w_sp1, gain=1.414)
nn.init.xavier_uniform_(self._w_sn0, gain=1.414)
nn.init.xavier_uniform_(self._w_sn1, gain=1.414)
nn.init.xavier_uniform_(self._w_tp0, gain=1.414)
nn.init.xavier_uniform_(self._w_tp1, gain=1.414)
nn.init.xavier_uniform_(self._w_tn0, gain=1.414)
nn.init.xavier_uniform_(self._w_tn1, gain=1.414)
if self._bias is not None:
self._bias.data.fill_(0.0)
nn.init.xavier_uniform_(self._W_prob, gain=1.414)
def forward(self, A_p:
'Union[torch.FloatTensor, torch.sparse_coo_tensor]', A_n:
'Union[torch.FloatTensor, torch.sparse_coo_tensor]', features:
'torch.FloatTensor', A_pt:
'Optional[Union[torch.FloatTensor, torch.sparse_coo_tensor]]'=None,
A_nt: 'Optional[Union[torch.FloatTensor, torch.sparse_coo_tensor]]'
=None) ->Tuple[torch.FloatTensor, torch.FloatTensor, torch.
LongTensor, torch.FloatTensor]:
"""
Making a forward pass of the signed graph clustering model with balance theory.
Arg types:
* **A_p** (PyTorch FloatTensor or PyTorch sparse_coo_tensor) - Row-normalized positive part of the adjacency matrix.
* **A_n** (PyTorch FloatTensor or PyTorch sparse_coo_tensor) - Row-normalized negative part of the adjacency matrix.
* **features** (PyTorch FloatTensor) - Input node features, with shape (num_nodes, num_features).
* **A_pt** (PyTorch FloatTensor or PyTorch sparse_coo_tensor, optional) - Transpose of column-normalized
positive part of the adjacency matrix. Default: None.
* **A_nt** (PyTorch FloatTensor or PyTorch sparse_coo_tensor, optional) - Transpose of column-normalized
negative part of the adjacency matrix. Default: None.
Return types:
* **z** (PyTorch FloatTensor) - Embedding matrix, with shape (num_nodes, 2*hidden) for undirected graphs
and (num_nodes, 4*hidden) for directed graphs.
* **output** (PyTorch FloatTensor) - Log of prob, with shape (num_nodes, num_clusters).
* **predictions_cluster** (PyTorch LongTensor) - Predicted labels.
* **prob** (PyTorch FloatTensor) - Probability assignment matrix of different clusters, with shape (num_nodes, num_clusters).
"""
if self._undirected:
x_p = torch.mm(features, self._w_p0)
x_p = self._relu(x_p)
x_p = self._dropout(x_p)
x_p = torch.mm(x_p, self._w_p1)
x_n = torch.mm(features, self._w_n0)
x_n = self._relu(x_n)
x_n = self._dropout(x_n)
x_n = torch.mm(x_n, self._w_n1)
feat_p = self._w_p[0] * x_p
feat_n = torch.zeros_like(feat_p)
curr_p = x_p.clone()
curr_n_aux = x_n.clone()
j = 0
for h in range(0, self._hop_p - 1):
if h > 0:
curr_p = torch.matmul(A_p, curr_p)
curr_n_aux = torch.matmul(A_p, curr_n_aux)
feat_p += self._w_p[h] * curr_p
if h != self._hop_p - 2:
curr_n = torch.matmul(A_n, curr_n_aux)
feat_n += self._w_n[j] * curr_n
j += 1
for _ in range(self._hop_p - 3 - h):
curr_n = torch.matmul(A_p, curr_n)
feat_n += self._w_n[j] * curr_n
j += 1
feat_p += self._w_p[3] * torch.matmul(A_n, torch.matmul(A_n, x_p))
feat = torch.cat([feat_p, feat_n], dim=1)
else:
x_sp = torch.mm(features, self._w_sp0)
x_sp = self._relu(x_sp)
x_sp = self._dropout(x_sp)
x_sp = torch.mm(x_sp, self._w_sp1)
x_sn = torch.mm(features, self._w_sn0)
x_sn = self._relu(x_sn)
x_sn = self._dropout(x_sn)
x_sn = torch.mm(x_sn, self._w_sn1)
x_tp = torch.mm(features, self._w_tp0)
x_tp = self._relu(x_tp)
x_tp = self._dropout(x_tp)
x_tp = torch.mm(x_tp, self._w_tp1)
x_tn = torch.mm(features, self._w_tn0)
x_tn = self._relu(x_tn)
x_tn = self._dropout(x_tn)
x_tn = torch.mm(x_tn, self._w_tn1)
A_sp = A_p
A_sn = A_n
A_tp = A_pt
A_tn = A_nt
feat_sp = self._w_sp[0] * x_sp
feat_sn = torch.zeros_like(feat_sp)
feat_tp = self._w_tp[0] * x_tp
feat_tn = torch.zeros_like(feat_tp)
curr_sp = x_sp.clone()
curr_sn_aux = x_sn.clone()
curr_tp = x_tp.clone()
curr_tn_aux = x_tn.clone()
j = 0
for h in range(0, self._hop_p - 1):
if h > 0:
curr_sp = torch.matmul(A_sp, curr_sp)
curr_sn_aux = torch.matmul(A_sp, curr_sn_aux)
curr_tp = torch.matmul(A_tp, curr_tp)
curr_tn_aux = torch.matmul(A_tp, curr_tn_aux)
feat_sp += self._w_sp[h] * curr_sp
feat_tp += self._w_tp[h] * curr_tp
if h != self._hop_p - 2:
curr_sn = torch.matmul(A_sn, curr_sn_aux)
curr_tn = torch.matmul(A_tn, curr_tn_aux)
feat_sn += self._w_sn[j] * curr_sn
feat_tn += self._w_tn[j] * curr_tn
j += 1
for _ in range(self._hop_p - 3 - h):
curr_sn = torch.matmul(A_sp, curr_sn)
curr_tn = torch.matmul(A_tp, curr_tn)
feat_sn += self._w_sn[j] * curr_sn
feat_tn += self._w_tn[j] * curr_tn
j += 1
feat_sp += self._w_sp[3] * torch.matmul(A_sn, torch.matmul(A_sn,
x_sp))
feat_tp += self._w_tp[3] * torch.matmul(A_tn, torch.matmul(A_tn,
x_tp))
feat = torch.cat([feat_sp, feat_sn, feat_tp, feat_tn], dim=1)
z = feat
output = torch.mm(z, self._W_prob)
if self._bias is not None:
output = output + self._bias
predictions_cluster = torch.argmax(output, dim=1)
prob = F.softmax(output, dim=1)
output = F.log_softmax(output, dim=1)
return F.normalize(z), output, predictions_cluster, prob
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'nfeat': 4, 'hidden': 4, 'nclass': 4, 'dropout': 0.5,
'hop': 2}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_per_fused_cat_div_linalg_vector_norm_1(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8,
out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp5 = tl.load(in_ptr0 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp9 = tl.load(in_ptr0 + 1)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp14 = tl.load(in_ptr0 + 2)
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp19 = tl.load(in_ptr0 + 3)
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp29 = tl.load(in_ptr5 + 0)
tmp30 = tl.broadcast_to(tmp29, [XBLOCK, RBLOCK])
tmp33 = tl.load(in_ptr5 + 1)
tmp34 = tl.broadcast_to(tmp33, [XBLOCK, RBLOCK])
tmp38 = tl.load(in_ptr5 + 2)
tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK])
tmp0 = r1
tl.full([1, 1], 0, tl.int64)
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp7 = tl.load(in_ptr1 + (4 * x0 + r1), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp8 = tmp6 * tmp7
tmp11 = tl.load(in_ptr2 + (4 * x0 + r1), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = tmp10 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tl.load(in_ptr3 + (4 * x0 + r1), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp17 = tmp15 * tmp16
tmp18 = tmp13 + tmp17
tmp21 = tl.load(in_ptr4 + (4 * x0 + r1), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp22 = tmp20 * tmp21
tmp23 = tmp18 + tmp22
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp4, tmp23, tmp24)
tmp26 = tmp0 >= tmp3
tl.full([1, 1], 8, tl.int64)
tmp31 = tl.load(in_ptr6 + (4 * x0 + (-4 + r1)), tmp26 & xmask,
eviction_policy='evict_last', other=0.0)
tmp32 = tmp30 * tmp31
tmp35 = tl.load(in_ptr7 + (4 * x0 + (-4 + r1)), tmp26 & xmask,
eviction_policy='evict_last', other=0.0)
tmp36 = tmp34 * tmp35
tmp37 = tmp32 + tmp36
tmp40 = tl.load(in_ptr8 + (4 * x0 + (-4 + r1)), tmp26 & xmask,
eviction_policy='evict_last', other=0.0)
tmp41 = tmp39 * tmp40
tmp42 = tmp37 + tmp41
tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype)
tmp44 = tl.where(tmp26, tmp42, tmp43)
tmp45 = tl.where(tmp4, tmp25, tmp44)
tmp46 = tmp45 * tmp45
tmp47 = tl.broadcast_to(tmp46, [XBLOCK, RBLOCK])
tmp49 = tl.where(xmask, tmp47, 0)
tmp50 = tl.sum(tmp49, 1)[:, None]
tmp51 = libdevice.sqrt(tmp50)
tmp52 = 1e-12
tmp53 = triton_helpers.maximum(tmp51, tmp52)
tmp54 = tmp45 / tmp53
tl.store(out_ptr0 + (r1 + 8 * x0), tmp45, xmask)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp51, xmask)
tl.store(out_ptr1 + (r1 + 8 * x0), tmp54, xmask)
@triton.jit
def triton_poi_fused_argmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp32 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 > tmp1
tmp3 = tmp0 == tmp1
tmp4 = tmp0 != tmp0
tmp5 = tmp1 != tmp1
tmp6 = tmp4 > tmp5
tmp7 = tmp2 | tmp6
tmp8 = tmp4 & tmp5
tmp9 = tmp3 | tmp8
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = tl.full([1], 1, tl.int64)
tmp12 = tmp10 < tmp11
tmp13 = tmp9 & tmp12
tmp14 = tmp7 | tmp13
tmp15 = tl.where(tmp14, tmp0, tmp1)
tmp16 = tl.where(tmp14, tmp10, tmp11)
tmp18 = tmp15 > tmp17
tmp19 = tmp15 == tmp17
tmp20 = tmp15 != tmp15
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1], 2, tl.int64)
tmp27 = tmp16 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tmp23 | tmp28
tmp30 = tl.where(tmp29, tmp15, tmp17)
tmp31 = tl.where(tmp29, tmp16, tmp26)
tmp33 = tmp30 > tmp32
tmp34 = tmp30 == tmp32
tmp35 = tmp30 != tmp30
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1], 3, tl.int64)
tmp42 = tmp31 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tl.where(tmp44, tmp30, tmp32)
tmp46 = tl.where(tmp44, tmp31, tmp41)
tl.store(out_ptr0 + x0, tmp46, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax__softmax_4(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = tl_math.exp(tmp0)
tmp3 = tl_math.exp(tmp2)
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp3 + tmp5
tmp8 = tl_math.exp(tmp7)
tmp9 = tmp6 + tmp8
tmp11 = tl_math.exp(tmp10)
tmp12 = tmp9 + tmp11
tmp13 = tmp1 / tmp12
tmp14 = tl_math.log(tmp12)
tmp15 = tmp0 - tmp14
tl.store(out_ptr0 + x2, tmp13, xmask)
tl.store(out_ptr1 + x2, tmp15, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, 1), (1, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (3, 1), (1, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (8, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_2, primals_1, out=buf0)
del primals_1
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(16)](buf1, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf1, primals_3, out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_2, primals_4, out=buf3)
del primals_4
buf4 = buf3
del buf3
triton_poi_fused_relu_0[grid(16)](buf4, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf4, primals_5, out=buf5)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_7, buf5, out=buf6)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_9, buf6, out=buf7)
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_9, buf2, out=buf8)
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_9, buf5, out=buf9)
buf10 = buf5
del buf5
extern_kernels.mm(primals_7, buf9, out=buf10)
buf11 = buf9
del buf9
extern_kernels.mm(primals_9, buf8, out=buf11)
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_7, buf2, out=buf12)
buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_7, buf12, out=buf13)
buf14 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
buf20 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf21 = reinterpret_tensor(buf20, (4, 1), (1, 1), 0)
del buf20
buf22 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
triton_per_fused_cat_div_linalg_vector_norm_1[grid(4)](buf21,
primals_6, buf2, buf8, buf11, buf13, primals_8, buf6, buf7,
buf10, buf14, buf22, 4, 8, XBLOCK=1, num_warps=2, num_stages=1)
buf15 = buf12
del buf12
extern_kernels.addmm(primals_11, buf14, primals_10, alpha=1, beta=1,
out=buf15)
del primals_11
buf16 = empty_strided_cuda((4,), (1,), torch.int64)
triton_poi_fused_argmax_2[grid(4)](buf15, buf16, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(16)](buf15, buf17, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf18 = buf15
del buf15
buf19 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__log_softmax__softmax_4[grid(16)](buf17, buf18,
buf19, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf17
return (buf22, buf19, buf16, buf18, primals_6, primals_8, buf1, buf2,
buf4, buf6, buf7, buf8, buf10, buf11, buf13, buf14, buf18, buf19,
buf21, reinterpret_tensor(primals_10, (4, 8), (1, 4), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0),
reinterpret_tensor(primals_9, (4, 4), (1, 4), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0))
class Balance_TheoryNew(nn.Module):
"""The signed graph clustering model with balance theory, restricted to 2 hops for fair comparison with SSSNET.
Args:
nfeat (int): Number of features.
hidden (int): Hidden dimensions of the initial MLP.
nclass (int): Number of clusters.
dropout (float): Dropout probability.
hop (int): Number of hops to consider. (need to be 2)
directed (bool, optional): Whether the input network is directed or not. (default: :obj:`False`)
bias (bool, optional): If set to :obj:`False`, the layer will not learn
an additive bias. (default: :obj:`True`)
"""
def __init__(self, nfeat: 'int', hidden: 'int', nclass: 'int', dropout:
'float', hop: 'int', directed: 'bool'=False, bias: 'bool'=True):
super(Balance_TheoryNew, self).__init__()
nh1 = hidden
nh2 = hidden
self._num_clusters = int(nclass)
assert hop == 2, 'please only use 2 hops'
self._hop_p = 4
self._hop_n = 3
if bias:
self._bias = Parameter(torch.FloatTensor(self._num_clusters))
else:
self.register_parameter('_bias', None)
self._relu = nn.ReLU()
self._dropout = nn.Dropout(p=dropout)
self._undirected = not directed
if self._undirected:
self._w_p0 = Parameter(torch.FloatTensor(nfeat, nh1))
self._w_p1 = Parameter(torch.FloatTensor(nh1, nh2))
self._w_n0 = Parameter(torch.FloatTensor(nfeat, nh1))
self._w_n1 = Parameter(torch.FloatTensor(nh1, nh2))
self._w_p = Parameter(torch.FloatTensor(self._hop_p, 1))
self._w_n = Parameter(torch.FloatTensor(self._hop_n, 1))
self._W_prob = Parameter(torch.FloatTensor(2 * nh2, self.
_num_clusters))
self._reset_parameters_undirected()
else:
self._w_sp0 = Parameter(torch.FloatTensor(nfeat, nh1))
self._w_sp1 = Parameter(torch.FloatTensor(nh1, nh2))
self._w_sn0 = Parameter(torch.FloatTensor(nfeat, nh1))
self._w_sn1 = Parameter(torch.FloatTensor(nh1, nh2))
self._w_tp0 = Parameter(torch.FloatTensor(nfeat, nh1))
self._w_tp1 = Parameter(torch.FloatTensor(nh1, nh2))
self._w_tn0 = Parameter(torch.FloatTensor(nfeat, nh1))
self._w_tn1 = Parameter(torch.FloatTensor(nh1, nh2))
self._w_sp = Parameter(torch.FloatTensor(self._hop_p, 1))
self._w_sn = Parameter(torch.FloatTensor(self._hop_n, 1))
self._w_tp = Parameter(torch.FloatTensor(self._hop_p, 1))
self._w_tn = Parameter(torch.FloatTensor(self._hop_n, 1))
self._W_prob = Parameter(torch.FloatTensor(4 * nh2, self.
_num_clusters))
self._reset_parameters_directed()
def _reset_parameters_undirected(self):
self._w_p.data.fill_(1.0)
self._w_n.data.fill_(1.0)
nn.init.xavier_uniform_(self._w_p0, gain=1.414)
nn.init.xavier_uniform_(self._w_p1, gain=1.414)
nn.init.xavier_uniform_(self._w_n0, gain=1.414)
nn.init.xavier_uniform_(self._w_n1, gain=1.414)
if self._bias is not None:
self._bias.data.fill_(0.0)
nn.init.xavier_uniform_(self._W_prob, gain=1.414)
def _reset_parameters_directed(self):
self._w_sp.data.fill_(1.0)
self._w_sn.data.fill_(1.0)
self._w_tp.data.fill_(1.0)
self._w_tn.data.fill_(1.0)
nn.init.xavier_uniform_(self._w_sp0, gain=1.414)
nn.init.xavier_uniform_(self._w_sp1, gain=1.414)
nn.init.xavier_uniform_(self._w_sn0, gain=1.414)
nn.init.xavier_uniform_(self._w_sn1, gain=1.414)
nn.init.xavier_uniform_(self._w_tp0, gain=1.414)
nn.init.xavier_uniform_(self._w_tp1, gain=1.414)
nn.init.xavier_uniform_(self._w_tn0, gain=1.414)
nn.init.xavier_uniform_(self._w_tn1, gain=1.414)
if self._bias is not None:
self._bias.data.fill_(0.0)
nn.init.xavier_uniform_(self._W_prob, gain=1.414)
def forward(self, input_0, input_1, input_2):
primals_11 = self._bias
primals_1 = self._w_p0
primals_2 = self._w_p1
primals_3 = self._w_n0
primals_4 = self._w_n1
primals_6 = self._w_p
primals_8 = self._w_n
primals_10 = self._W_prob
primals_5 = input_0
primals_7 = input_1
primals_9 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0], output[1], output[2], output[3]
| SherylHYX/SSSNET_Signed_Clustering | Balance_Theory | false | 17,922 | [
"MIT"
] | 5 | 85736c18e86b396d64177d22b8c7f9859dfd794c | https://github.com/SherylHYX/SSSNET_Signed_Clustering/tree/85736c18e86b396d64177d22b8c7f9859dfd794c |
LSN | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/oi/coifsl54slglgagsvc5ivgip6nlhioo5asq3u5kr5trgsswtq6fq.py
# Topologically Sorted Source Nodes: [y_relu, num, denom, add, truediv], Original ATen: [aten.relu, aten.mul, aten.sum, aten.add, aten.div]
# Source node to ATen node mapping:
# add => add
# denom => sum_1
# num => mul
# truediv => div
# y_relu => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%arg0_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu, 20000), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%relu,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-08), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %add), kwargs = {})
triton_per_fused_add_div_mul_relu_sum_0 = async_compile.triton('triton_per_fused_add_div_mul_relu_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_relu_sum_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mul_relu_sum_0(in_ptr0, out_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = 20000.0
tmp7 = tmp2 * tmp6
tmp8 = 1e-08
tmp9 = tmp5 + tmp8
tmp10 = tmp7 / tmp9
tl.store(out_ptr1 + (tl.broadcast_to(r0, [RBLOCK])), tmp10, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [y_relu, num, denom, add, truediv], Original ATen: [aten.relu, aten.mul, aten.sum, aten.add, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mul_relu_sum_0.run(arg0_1, buf1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import Tensor
import torch.nn as nn
import torch.utils.data
import torch.nn.parallel
import torch.nn.functional as F
class LSN(nn.Module):
""" Custom Linear layer that modifies standard ReLU layer"""
__constants__ = ['inplace']
inplace: 'bool'
def __init__(self, scale: 'int'=20000, inplace: 'bool'=False):
super(LSN, self).__init__()
self.inplace = inplace
self.scale = scale
def forward(self, input: 'Tensor') ->Tensor:
y_relu = F.relu(input, inplace=self.inplace)
num = y_relu * self.scale
denom = torch.sum(y_relu)
return num / (denom + 1e-08)
def extra_repr(self) ->str:
inplace_str = 'inplace=True' if self.inplace else ''
return inplace_str
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mul_relu_sum_0(in_ptr0, out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = 20000.0
tmp7 = tmp2 * tmp6
tmp8 = 1e-08
tmp9 = tmp5 + tmp8
tmp10 = tmp7 / tmp9
tl.store(out_ptr1 + tl.broadcast_to(r0, [RBLOCK]), tmp10, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_mul_relu_sum_0[grid(1)](arg0_1, buf1, 1,
256, num_warps=2, num_stages=1)
del arg0_1
return buf1,
class LSNNew(nn.Module):
""" Custom Linear layer that modifies standard ReLU layer"""
__constants__ = ['inplace']
inplace: 'bool'
def __init__(self, scale: 'int'=20000, inplace: 'bool'=False):
super(LSNNew, self).__init__()
self.inplace = inplace
self.scale = scale
def extra_repr(self) ->str:
inplace_str = 'inplace=True' if self.inplace else ''
return inplace_str
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| SindiLab/ACTIVA | LSN | false | 17,923 | [
"MIT"
] | 6 | 599f57478c5e13868d27879632c54964bf7b02ad | https://github.com/SindiLab/ACTIVA/tree/599f57478c5e13868d27879632c54964bf7b02ad |
EncoderImagePrecomp | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/ve/cvejitjptoqd7b6foowew6rmkelfes5xonbfhwup3okqgfxidhay.py
# Topologically Sorted Source Nodes: [pow_1, sum_1, norm, X], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# X => div
# norm => sqrt
# pow_1 => pow_1
# sum_1 => sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_1, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sum_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_1, %sqrt), kwargs = {})
triton_poi_fused_div_pow_sqrt_sum_0 = async_compile.triton('triton_poi_fused_div_pow_sqrt_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_pow_sqrt_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_pow_sqrt_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp0 / tmp12
tl.store(out_ptr0 + (x3), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [features], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, sum_1, norm, X], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_pow_sqrt_sum_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
return (buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
from torch import nn
from collections import OrderedDict
import torch.nn.init
def l2norm(X):
"""L2-normalize columns of X
"""
norm = torch.pow(X, 2).sum(dim=1, keepdim=True).sqrt()
X = torch.div(X, norm)
return X
class EncoderImagePrecomp(nn.Module):
def __init__(self, img_dim, embed_size, use_abs=False, no_imgnorm=False):
super(EncoderImagePrecomp, self).__init__()
self.embed_size = embed_size
self.no_imgnorm = no_imgnorm
self.use_abs = use_abs
self.fc = nn.Linear(img_dim, embed_size)
self.init_weights()
def init_weights(self):
"""Xavier initialization for the fully connected layer
"""
r = np.sqrt(6.0) / np.sqrt(self.fc.in_features + self.fc.out_features)
self.fc.weight.data.uniform_(-r, r)
self.fc.bias.data.fill_(0)
def forward(self, images):
"""Extract image feature vectors."""
features = self.fc(images)
if not self.no_imgnorm:
features = l2norm(features)
if self.use_abs:
features = torch.abs(features)
return features
def load_state_dict(self, state_dict):
"""Copies parameters. overwritting the default one to
accept state_dict from Full model
"""
own_state = self.state_dict()
new_state = OrderedDict()
for name, param in state_dict.items():
if name in own_state:
new_state[name] = param
super(EncoderImagePrecomp, self).load_state_dict(new_state)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'img_dim': 4, 'embed_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
from torch import nn
from collections import OrderedDict
import torch.nn.init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_pow_sqrt_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp0 / tmp12
tl.store(out_ptr0 + x3, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_pow_sqrt_sum_0[grid(256)](buf0, buf1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0
def l2norm(X):
"""L2-normalize columns of X
"""
norm = torch.pow(X, 2).sum(dim=1, keepdim=True).sqrt()
X = torch.div(X, norm)
return X
class EncoderImagePrecompNew(nn.Module):
def __init__(self, img_dim, embed_size, use_abs=False, no_imgnorm=False):
super(EncoderImagePrecompNew, self).__init__()
self.embed_size = embed_size
self.no_imgnorm = no_imgnorm
self.use_abs = use_abs
self.fc = nn.Linear(img_dim, embed_size)
self.init_weights()
def init_weights(self):
"""Xavier initialization for the fully connected layer
"""
r = np.sqrt(6.0) / np.sqrt(self.fc.in_features + self.fc.out_features)
self.fc.weight.data.uniform_(-r, r)
self.fc.bias.data.fill_(0)
def load_state_dict(self, state_dict):
"""Copies parameters. overwritting the default one to
accept state_dict from Full model
"""
own_state = self.state_dict()
new_state = OrderedDict()
for name, param in state_dict.items():
if name in own_state:
new_state[name] = param
super(EncoderImagePrecompNew, self).load_state_dict(new_state)
def forward(self, input_0):
primals_1 = self.fc.weight
primals_2 = self.fc.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| Shiyang-Yan/Discrete-continous-PG-for-Retrieval | EncoderImagePrecomp | false | 17,924 | [
"Apache-2.0"
] | 8 | 39fd7a81f732ae043c2ea20352a0c55b72834639 | https://github.com/Shiyang-Yan/Discrete-continous-PG-for-Retrieval/tree/39fd7a81f732ae043c2ea20352a0c55b72834639 |
SSSNET | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/wv/cwvwn4skuzp6hvqejtyxfz5qh5bqj7bnmxjbe4yznbquqqgow5kx.py
# Topologically Sorted Source Nodes: [x_p_1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_p_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%mm,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/b5/cb5xkg3fblhasuqxe2p5x3e4ungoxjawaps5da6qyu6fr7euwayf.py
# Topologically Sorted Source Nodes: [feat_n_1, mul_2, feat_n_2, mul_3, feat_n_3, mul_4, feat_n_4, mul_6, feat_n_5, mul_7, feat_n_6, mul_8, feat_n_7, mul_10, feat_n_8, mul_11, feat_n_9, mul_13, feat_n_10], Original ATen: [aten.add, aten.mul]
# Source node to ATen node mapping:
# feat_n_1 => mul_1
# feat_n_10 => add_12
# feat_n_2 => add_1
# feat_n_3 => add_2
# feat_n_4 => add_3
# feat_n_5 => add_5
# feat_n_6 => add_6
# feat_n_7 => add_7
# feat_n_8 => add_9
# feat_n_9 => add_10
# mul_10 => mul_10
# mul_11 => mul_11
# mul_13 => mul_13
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# mul_6 => mul_6
# mul_7 => mul_7
# mul_8 => mul_8
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_1, %mm_4), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_2, %mm_5), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_3, %mm_6), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %mul_3), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_4, %mm_7), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %mul_4), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_6, %mm_10), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %mul_6), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_7, %mm_11), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %mul_7), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_8, %mm_12), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_6, %mul_8), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_10, %mm_15), kwargs = {})
# %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_7, %mul_10), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_11, %mm_16), kwargs = {})
# %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_9, %mul_11), kwargs = {})
# %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_13, %mm_19), kwargs = {})
# %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_10, %mul_13), kwargs = {})
triton_poi_fused_add_mul_1 = async_compile.triton('triton_poi_fused_add_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 20, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
x1 = xindex % 4
x2 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + (x0), xmask)
tmp4 = tl.load(in_ptr0 + (1))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp6 = tl.load(in_ptr2 + (x0), xmask)
tmp9 = tl.load(in_ptr0 + (2))
tmp10 = tl.broadcast_to(tmp9, [XBLOCK])
tmp11 = tl.load(in_ptr3 + (x0), xmask)
tmp14 = tl.load(in_ptr0 + (3))
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp16 = tl.load(in_ptr4 + (x0), xmask)
tmp19 = tl.load(in_ptr0 + (4))
tmp20 = tl.broadcast_to(tmp19, [XBLOCK])
tmp21 = tl.load(in_ptr5 + (x0), xmask)
tmp24 = tl.load(in_ptr0 + (5))
tmp25 = tl.broadcast_to(tmp24, [XBLOCK])
tmp26 = tl.load(in_ptr6 + (x0), xmask)
tmp29 = tl.load(in_ptr0 + (6))
tmp30 = tl.broadcast_to(tmp29, [XBLOCK])
tmp31 = tl.load(in_ptr7 + (x0), xmask)
tmp34 = tl.load(in_ptr0 + (7))
tmp35 = tl.broadcast_to(tmp34, [XBLOCK])
tmp36 = tl.load(in_ptr8 + (x0), xmask)
tmp39 = tl.load(in_ptr0 + (8))
tmp40 = tl.broadcast_to(tmp39, [XBLOCK])
tmp41 = tl.load(in_ptr9 + (x0), xmask)
tmp44 = tl.load(in_ptr0 + (9))
tmp45 = tl.broadcast_to(tmp44, [XBLOCK])
tmp46 = tl.load(in_ptr10 + (x0), xmask)
tmp3 = tmp1 * tmp2
tmp7 = tmp5 * tmp6
tmp8 = tmp3 + tmp7
tmp12 = tmp10 * tmp11
tmp13 = tmp8 + tmp12
tmp17 = tmp15 * tmp16
tmp18 = tmp13 + tmp17
tmp22 = tmp20 * tmp21
tmp23 = tmp18 + tmp22
tmp27 = tmp25 * tmp26
tmp28 = tmp23 + tmp27
tmp32 = tmp30 * tmp31
tmp33 = tmp28 + tmp32
tmp37 = tmp35 * tmp36
tmp38 = tmp33 + tmp37
tmp42 = tmp40 * tmp41
tmp43 = tmp38 + tmp42
tmp47 = tmp45 * tmp46
tmp48 = tmp43 + tmp47
tl.store(out_ptr0 + (x1 + (8*x2)), tmp48, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ev/cevfemdzv5xy5vlnpwcfmwfdx2k2cg4tzdbeemd3bmosu5h2446g.py
# Topologically Sorted Source Nodes: [feat_p, mul_5, feat_p_1, mul_9, feat_p_2, mul_12, feat_p_3, mul_14, feat_p_4], Original ATen: [aten.mul, aten.add]
# Source node to ATen node mapping:
# feat_p => mul
# feat_p_1 => add_4
# feat_p_2 => add_8
# feat_p_3 => add_11
# feat_p_4 => add_13
# mul_12 => mul_12
# mul_14 => mul_14
# mul_5 => mul_5
# mul_9 => mul_9
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select, %mm_1), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_5, %mm_8), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_5), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_9, %mm_13), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %mul_9), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_12, %mm_17), kwargs = {})
# %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_8, %mul_12), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_14, %mm_20), kwargs = {})
# %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_11, %mul_14), kwargs = {})
triton_poi_fused_add_mul_2 = async_compile.triton('triton_poi_fused_add_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + (x2), xmask)
tmp4 = tl.load(in_ptr0 + (1))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp6 = tl.load(in_ptr2 + (x2), xmask)
tmp9 = tl.load(in_ptr0 + (2))
tmp10 = tl.broadcast_to(tmp9, [XBLOCK])
tmp11 = tl.load(in_ptr3 + (x2), xmask)
tmp14 = tl.load(in_ptr0 + (3))
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp16 = tl.load(in_ptr4 + (x2), xmask)
tmp19 = tl.load(in_ptr0 + (4))
tmp20 = tl.broadcast_to(tmp19, [XBLOCK])
tmp21 = tl.load(in_ptr5 + (x2), xmask)
tmp3 = tmp1 * tmp2
tmp7 = tmp5 * tmp6
tmp8 = tmp3 + tmp7
tmp12 = tmp10 * tmp11
tmp13 = tmp8 + tmp12
tmp17 = tmp15 * tmp16
tmp18 = tmp13 + tmp17
tmp22 = tmp20 * tmp21
tmp23 = tmp18 + tmp22
tl.store(out_ptr0 + (x0 + (8*x1)), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/52/c52gkvexompyockyalixhu3hi3h3ztepks4yjua4f2xil5rgzgsq.py
# Topologically Sorted Source Nodes: [predictions_cluster], Original ATen: [aten.argmax]
# Source node to ATen node mapping:
# predictions_cluster => argmax
# Graph fragment:
# %argmax : [num_users=1] = call_function[target=torch.ops.aten.argmax.default](args = (%addmm_default, 1), kwargs = {})
triton_poi_fused_argmax_3 = async_compile.triton('triton_poi_fused_argmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_argmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_argmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 > tmp1
tmp3 = tmp0 == tmp1
tmp4 = tmp0 != tmp0
tmp5 = tmp1 != tmp1
tmp6 = tmp4 > tmp5
tmp7 = tmp2 | tmp6
tmp8 = tmp4 & tmp5
tmp9 = tmp3 | tmp8
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = tl.full([1], 1, tl.int64)
tmp12 = tmp10 < tmp11
tmp13 = tmp9 & tmp12
tmp14 = tmp7 | tmp13
tmp15 = tl.where(tmp14, tmp0, tmp1)
tmp16 = tl.where(tmp14, tmp10, tmp11)
tmp18 = tmp15 > tmp17
tmp19 = tmp15 == tmp17
tmp20 = tmp15 != tmp15
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1], 2, tl.int64)
tmp27 = tmp16 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tmp23 | tmp28
tmp30 = tl.where(tmp29, tmp15, tmp17)
tmp31 = tl.where(tmp29, tmp16, tmp26)
tmp33 = tmp30 > tmp32
tmp34 = tmp30 == tmp32
tmp35 = tmp30 != tmp30
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1], 3, tl.int64)
tmp42 = tmp31 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tmp45 = tl.where(tmp44, tmp30, tmp32)
tmp46 = tl.where(tmp44, tmp31, tmp41)
tl.store(out_ptr0 + (x0), tmp46, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/h3/ch37q6doomo54ns6njzgluqhbadilwbwfjfs4tmgeydddzjc2yjz.py
# Topologically Sorted Source Nodes: [prob], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# prob => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_default, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_default, %amax), kwargs = {})
triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ly/clynxmnz7vtftcspr4wp7wxolezilu4wo67h3pvumjqsxis2mip7.py
# Topologically Sorted Source Nodes: [prob, output_2], Original ATen: [aten._softmax, aten._log_softmax]
# Source node to ATen node mapping:
# output_2 => log, sub_2
# prob => div, exp, sum_1
# Graph fragment:
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_poi_fused__log_softmax__softmax_5 = async_compile.triton('triton_poi_fused__log_softmax__softmax_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax__softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax__softmax_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp2 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl_math.exp(tmp0)
tmp3 = tl_math.exp(tmp2)
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp3 + tmp5
tmp8 = tl_math.exp(tmp7)
tmp9 = tmp6 + tmp8
tmp11 = tl_math.exp(tmp10)
tmp12 = tmp9 + tmp11
tmp13 = tmp1 / tmp12
tmp14 = tl_math.log(tmp12)
tmp15 = tmp0 - tmp14
tl.store(out_ptr0 + (x2), tmp13, xmask)
tl.store(out_ptr1 + (x2), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/lg/clgcbksjhrfwko45nuvh7lepfz5cbqsqgutdydacgs4kdfohn7om.py
# Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.linalg_vector_norm, aten.div]
# Source node to ATen node mapping:
# normalize => div_1, pow_1, pow_2, sum_3
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%cat, 2.0), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {})
# %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_3, 0.5), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%cat, %expand), kwargs = {})
triton_per_fused_div_linalg_vector_norm_6 = async_compile.triton('triton_per_fused_div_linalg_vector_norm_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 8],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_linalg_vector_norm_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_linalg_vector_norm_6(in_out_ptr0, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 8
RBLOCK: tl.constexpr = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (8*x0)), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp7 = 1e-12
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp0 / tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr0 + (r1 + (8*x0)), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (5, 1), (1, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (10, 1), (1, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (8, 4), (4, 1))
assert_size_stride(primals_11, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_p], Original ATen: [aten.mm]
extern_kernels.mm(primals_2, primals_1, out=buf0)
del primals_1
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_p_1], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_p_3], Original ATen: [aten.mm]
extern_kernels.mm(buf1, primals_3, out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_n], Original ATen: [aten.mm]
extern_kernels.mm(primals_2, primals_4, out=buf3)
del primals_4
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [x_n_1], Original ATen: [aten.relu]
triton_poi_fused_relu_0.run(buf4, 16, grid=grid(16), stream=stream0)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_n_3], Original ATen: [aten.mm]
extern_kernels.mm(buf4, primals_5, out=buf5)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [curr_n], Original ATen: [aten.mm]
extern_kernels.mm(primals_7, buf5, out=buf6)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [curr_n_1], Original ATen: [aten.mm]
extern_kernels.mm(primals_9, buf6, out=buf7)
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [curr_n_2], Original ATen: [aten.mm]
extern_kernels.mm(primals_9, buf7, out=buf8)
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [curr_n_3], Original ATen: [aten.mm]
extern_kernels.mm(primals_9, buf8, out=buf9)
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [curr_p_1], Original ATen: [aten.mm]
extern_kernels.mm(primals_9, buf2, out=buf10)
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [curr_n_aux_1], Original ATen: [aten.mm]
extern_kernels.mm(primals_9, buf5, out=buf11)
buf12 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [curr_n_4], Original ATen: [aten.mm]
extern_kernels.mm(primals_7, buf11, out=buf12)
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [curr_n_5], Original ATen: [aten.mm]
extern_kernels.mm(primals_9, buf12, out=buf14)
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [curr_n_6], Original ATen: [aten.mm]
extern_kernels.mm(primals_9, buf14, out=buf15)
buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [curr_n_aux_2], Original ATen: [aten.mm]
extern_kernels.mm(primals_9, buf11, out=buf17)
buf18 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [curr_n_7], Original ATen: [aten.mm]
extern_kernels.mm(primals_7, buf17, out=buf18)
buf19 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [curr_n_8], Original ATen: [aten.mm]
extern_kernels.mm(primals_9, buf18, out=buf19)
buf22 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [curr_n_aux_3], Original ATen: [aten.mm]
extern_kernels.mm(primals_9, buf17, out=buf22)
buf23 = buf17; del buf17 # reuse
# Topologically Sorted Source Nodes: [curr_n_9], Original ATen: [aten.mm]
extern_kernels.mm(primals_7, buf22, out=buf23)
buf27 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
buf26 = reinterpret_tensor(buf27, (4, 4), (8, 1), 4) # alias
# Topologically Sorted Source Nodes: [feat_n_1, mul_2, feat_n_2, mul_3, feat_n_3, mul_4, feat_n_4, mul_6, feat_n_5, mul_7, feat_n_6, mul_8, feat_n_7, mul_10, feat_n_8, mul_11, feat_n_9, mul_13, feat_n_10], Original ATen: [aten.add, aten.mul]
triton_poi_fused_add_mul_1.run(primals_8, buf6, buf7, buf8, buf9, buf12, buf14, buf15, buf18, buf19, buf23, buf26, 16, grid=grid(16), stream=stream0)
buf16 = buf22; del buf22 # reuse
# Topologically Sorted Source Nodes: [curr_p_2], Original ATen: [aten.mm]
extern_kernels.mm(primals_9, buf10, out=buf16)
buf21 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [curr_p_3], Original ATen: [aten.mm]
extern_kernels.mm(primals_9, buf16, out=buf21)
buf24 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [curr_p_4], Original ATen: [aten.mm]
extern_kernels.mm(primals_9, buf21, out=buf24)
buf25 = reinterpret_tensor(buf27, (4, 4), (8, 1), 0) # alias
# Topologically Sorted Source Nodes: [feat_p, mul_5, feat_p_1, mul_9, feat_p_2, mul_12, feat_p_3, mul_14, feat_p_4], Original ATen: [aten.mul, aten.add]
triton_poi_fused_add_mul_2.run(primals_6, buf2, buf10, buf16, buf21, buf24, buf25, 16, grid=grid(16), stream=stream0)
buf28 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.addmm(primals_11, buf27, primals_10, alpha=1, beta=1, out=buf28)
del primals_11
buf29 = empty_strided_cuda((4, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [predictions_cluster], Original ATen: [aten.argmax]
triton_poi_fused_argmax_3.run(buf28, buf29, 4, grid=grid(4), stream=stream0)
buf30 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [prob], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf28, buf30, 16, grid=grid(16), stream=stream0)
buf31 = buf28; del buf28 # reuse
buf32 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [prob, output_2], Original ATen: [aten._softmax, aten._log_softmax]
triton_poi_fused__log_softmax__softmax_5.run(buf30, buf31, buf32, 16, grid=grid(16), stream=stream0)
del buf30
buf33 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf34 = reinterpret_tensor(buf33, (4, 1), (1, 1), 0); del buf33 # reuse
buf35 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.linalg_vector_norm, aten.div]
triton_per_fused_div_linalg_vector_norm_6.run(buf34, buf27, buf35, 4, 8, grid=grid(4), stream=stream0)
return (buf35, buf32, buf29, buf31, buf1, buf2, buf4, reinterpret_tensor(primals_6, (1, ), (1, ), 0), buf6, reinterpret_tensor(primals_8, (1, ), (1, ), 0), buf7, reinterpret_tensor(primals_8, (1, ), (1, ), 1), buf8, reinterpret_tensor(primals_8, (1, ), (1, ), 2), buf9, reinterpret_tensor(primals_8, (1, ), (1, ), 3), buf10, reinterpret_tensor(primals_6, (1, ), (1, ), 1), buf12, reinterpret_tensor(primals_8, (1, ), (1, ), 4), buf14, reinterpret_tensor(primals_8, (1, ), (1, ), 5), buf15, reinterpret_tensor(primals_8, (1, ), (1, ), 6), buf16, reinterpret_tensor(primals_6, (1, ), (1, ), 2), buf18, reinterpret_tensor(primals_8, (1, ), (1, ), 7), buf19, reinterpret_tensor(primals_8, (1, ), (1, ), 8), buf21, reinterpret_tensor(primals_6, (1, ), (1, ), 3), buf23, reinterpret_tensor(primals_8, (1, ), (1, ), 9), buf24, reinterpret_tensor(primals_6, (1, ), (1, ), 4), buf27, buf31, buf32, buf34, reinterpret_tensor(primals_10, (4, 8), (1, 4), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((5, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((10, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from typing import Optional
from typing import Tuple
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from typing import Union
class SIMPA(nn.Module):
"""The signed mixed-path aggregation model.
Args:
hop (int): Number of hops to consider.
directed (bool, optional): Whether the input network is directed or not. (default: :obj:`False`)
"""
def __init__(self, hop: 'int', directed: 'bool'=False):
super(SIMPA, self).__init__()
self._hop_p = hop + 1
self._hop_n = int((1 + hop) * hop / 2)
self._undirected = not directed
if self._undirected:
self._w_p = Parameter(torch.FloatTensor(self._hop_p, 1))
self._w_n = Parameter(torch.FloatTensor(self._hop_n, 1))
self._reset_parameters_undirected()
else:
self._w_sp = Parameter(torch.FloatTensor(self._hop_p, 1))
self._w_sn = Parameter(torch.FloatTensor(self._hop_n, 1))
self._w_tp = Parameter(torch.FloatTensor(self._hop_p, 1))
self._w_tn = Parameter(torch.FloatTensor(self._hop_n, 1))
self._reset_parameters_directed()
def _reset_parameters_undirected(self):
self._w_p.data.fill_(1.0)
self._w_n.data.fill_(1.0)
def _reset_parameters_directed(self):
self._w_sp.data.fill_(1.0)
self._w_sn.data.fill_(1.0)
self._w_tp.data.fill_(1.0)
self._w_tn.data.fill_(1.0)
def forward(self, A_p:
'Union[torch.FloatTensor, torch.sparse_coo_tensor]', A_n:
'Union[torch.FloatTensor, torch.sparse_coo_tensor]', x_p:
'torch.FloatTensor', x_n: 'torch.FloatTensor', x_pt:
'Optional[torch.FloatTensor]'=None, x_nt:
'Optional[torch.FloatTensor]'=None, A_pt:
'Optional[Union[torch.FloatTensor, torch.sparse_coo_tensor]]'=None,
A_nt: 'Optional[Union[torch.FloatTensor, torch.sparse_coo_tensor]]'
=None) ->Tuple[torch.FloatTensor, torch.FloatTensor, torch.
LongTensor, torch.FloatTensor]:
"""
Making a forward pass of SIMPA.
Arg types:
* **A_p** (PyTorch FloatTensor or PyTorch sparse_coo_tensor) - Row-normalized positive part of the adjacency matrix.
* **A_n** (PyTorch FloatTensor or PyTorch sparse_coo_tensor) - Row-normalized negative part of the adjacency matrix.
* **x_p** (PyTorch FloatTensor) - Souce positive hidden representations.
* **x_n** (PyTorch FloatTensor) - Souce negative hidden representations.
* **x_pt** (PyTorch FloatTensor, optional) - Target positive hidden representations. Default: None.
* **x_nt** (PyTorch FloatTensor, optional) - Target negative hidden representations. Default: None.
* **A_pt** (PyTorch FloatTensor or PyTorch sparse_coo_tensor, optional) - Transpose of column-normalized
positive part of the adjacency matrix. Default: None.
* **A_nt** (PyTorch FloatTensor or PyTorch sparse_coo_tensor, optional) - Transpose of column-normalized
negative part of the adjacency matrix. Default: None.
Return types:
* **feat** (PyTorch FloatTensor) - Embedding matrix, with shape (num_nodes, 2*input_dim) for undirected graphs
and (num_nodes, 4*input_dim) for directed graphs.
"""
if self._undirected:
feat_p = self._w_p[0] * x_p
feat_n = torch.zeros_like(feat_p)
curr_p = x_p.clone()
curr_n_aux = x_n.clone()
j = 0
for h in range(0, self._hop_p):
if h > 0:
curr_p = torch.matmul(A_p, curr_p)
curr_n_aux = torch.matmul(A_p, curr_n_aux)
feat_p += self._w_p[h] * curr_p
if h != self._hop_p - 1:
curr_n = torch.matmul(A_n, curr_n_aux)
feat_n += self._w_n[j] * curr_n
j += 1
for _ in range(self._hop_p - 2 - h):
curr_n = torch.matmul(A_p, curr_n)
feat_n += self._w_n[j] * curr_n
j += 1
feat = torch.cat([feat_p, feat_n], dim=1)
else:
A_sp = A_p
A_sn = A_n
A_tp = A_pt
A_tn = A_nt
x_sp = x_p
x_sn = x_n
feat_sp = self._w_sp[0] * x_sp
feat_sn = torch.zeros_like(feat_sp)
feat_tp = self._w_tp[0] * x_pt
feat_tn = torch.zeros_like(feat_tp)
curr_sp = x_sp.clone()
curr_sn_aux = x_sn.clone()
curr_tp = x_pt.clone()
curr_tn_aux = x_nt.clone()
j = 0
for h in range(0, self._hop_p):
if h > 0:
curr_sp = torch.matmul(A_sp, curr_sp)
curr_sn_aux = torch.matmul(A_sp, curr_sn_aux)
curr_tp = torch.matmul(A_tp, curr_tp)
curr_tn_aux = torch.matmul(A_tp, curr_tn_aux)
feat_sp += self._w_sp[h] * curr_sp
feat_tp += self._w_tp[h] * curr_tp
if h != self._hop_p - 1:
curr_sn = torch.matmul(A_sn, curr_sn_aux)
curr_tn = torch.matmul(A_tn, curr_tn_aux)
feat_sn += self._w_sn[j] * curr_sn
feat_tn += self._w_tn[j] * curr_tn
j += 1
for _ in range(self._hop_p - 2 - h):
curr_sn = torch.matmul(A_sp, curr_sn)
curr_tn = torch.matmul(A_tp, curr_tn)
feat_sn += self._w_sn[j] * curr_sn
feat_tn += self._w_tn[j] * curr_tn
j += 1
feat = torch.cat([feat_sp, feat_sn, feat_tp, feat_tn], dim=1)
return feat
class SSSNET(nn.Module):
"""The signed graph clustering model.
Args:
nfeat (int): Number of features.
hidden (int): Hidden dimensions of the initial MLP.
nclass (int): Number of clusters.
dropout (float): Dropout probability.
hop (int): Number of hops to consider.
directed (bool, optional): Whether the input network is directed or not. (default: :obj:`False`)
bias (bool, optional): If set to :obj:`False`, the layer will not learn
an additive bias. (default: :obj:`True`)
"""
def __init__(self, nfeat: 'int', hidden: 'int', nclass: 'int', dropout:
'float', hop: 'int', directed: 'bool'=False, bias: 'bool'=True):
super(SSSNET, self).__init__()
nh1 = hidden
nh2 = hidden
self._num_clusters = int(nclass)
self._simpa = SIMPA(hop, directed)
if bias:
self._bias = Parameter(torch.FloatTensor(self._num_clusters))
else:
self.register_parameter('_bias', None)
self._relu = nn.ReLU()
self._dropout = nn.Dropout(p=dropout)
self._undirected = not directed
if self._undirected:
self._w_p0 = Parameter(torch.FloatTensor(nfeat, nh1))
self._w_p1 = Parameter(torch.FloatTensor(nh1, nh2))
self._w_n0 = Parameter(torch.FloatTensor(nfeat, nh1))
self._w_n1 = Parameter(torch.FloatTensor(nh1, nh2))
self._W_prob = Parameter(torch.FloatTensor(2 * nh2, self.
_num_clusters))
self._reset_parameters_undirected()
else:
self._w_sp0 = Parameter(torch.FloatTensor(nfeat, nh1))
self._w_sp1 = Parameter(torch.FloatTensor(nh1, nh2))
self._w_sn0 = Parameter(torch.FloatTensor(nfeat, nh1))
self._w_sn1 = Parameter(torch.FloatTensor(nh1, nh2))
self._w_tp0 = Parameter(torch.FloatTensor(nfeat, nh1))
self._w_tp1 = Parameter(torch.FloatTensor(nh1, nh2))
self._w_tn0 = Parameter(torch.FloatTensor(nfeat, nh1))
self._w_tn1 = Parameter(torch.FloatTensor(nh1, nh2))
self._W_prob = Parameter(torch.FloatTensor(4 * nh2, self.
_num_clusters))
self._reset_parameters_directed()
def _reset_parameters_undirected(self):
nn.init.xavier_uniform_(self._w_p0, gain=1.414)
nn.init.xavier_uniform_(self._w_p1, gain=1.414)
nn.init.xavier_uniform_(self._w_n0, gain=1.414)
nn.init.xavier_uniform_(self._w_n1, gain=1.414)
if self._bias is not None:
self._bias.data.fill_(0.0)
nn.init.xavier_uniform_(self._W_prob, gain=1.414)
def _reset_parameters_directed(self):
nn.init.xavier_uniform_(self._w_sp0, gain=1.414)
nn.init.xavier_uniform_(self._w_sp1, gain=1.414)
nn.init.xavier_uniform_(self._w_sn0, gain=1.414)
nn.init.xavier_uniform_(self._w_sn1, gain=1.414)
nn.init.xavier_uniform_(self._w_tp0, gain=1.414)
nn.init.xavier_uniform_(self._w_tp1, gain=1.414)
nn.init.xavier_uniform_(self._w_tn0, gain=1.414)
nn.init.xavier_uniform_(self._w_tn1, gain=1.414)
if self._bias is not None:
self._bias.data.fill_(0.0)
nn.init.xavier_uniform_(self._W_prob, gain=1.414)
def forward(self, A_p:
'Union[torch.FloatTensor, torch.sparse_coo_tensor]', A_n:
'Union[torch.FloatTensor, torch.sparse_coo_tensor]', features:
'torch.FloatTensor', A_pt:
'Optional[Union[torch.FloatTensor, torch.sparse_coo_tensor]]'=None,
A_nt: 'Optional[Union[torch.FloatTensor, torch.sparse_coo_tensor]]'
=None) ->Tuple[torch.FloatTensor, torch.FloatTensor, torch.
LongTensor, torch.FloatTensor]:
"""
Making a forward pass of the SSSNET.
Arg types:
* **A_p** (PyTorch FloatTensor or PyTorch sparse_coo_tensor) - Row-normalized positive part of the adjacency matrix.
* **A_n** (PyTorch FloatTensor or PyTorch sparse_coo_tensor) - Row-normalized negative part of the adjacency matrix.
* **features** (PyTorch FloatTensor) - Input node features, with shape (num_nodes, num_features).
* **A_pt** (PyTorch FloatTensor or PyTorch sparse_coo_tensor, optional) - Transpose of column-normalized
positive part of the adjacency matrix. Default: None.
* **A_nt** (PyTorch FloatTensor or PyTorch sparse_coo_tensor, optional) - Transpose of column-normalized
negative part of the adjacency matrix. Default: None.
Return types:
* **z** (PyTorch FloatTensor) - Embedding matrix, with shape (num_nodes, 2*hidden) for undirected graphs
and (num_nodes, 4*hidden) for directed graphs.
* **output** (PyTorch FloatTensor) - Log of prob, with shape (num_nodes, num_clusters).
* **predictions_cluster** (PyTorch LongTensor) - Predicted labels.
* **prob** (PyTorch FloatTensor) - Probability assignment matrix of different clusters, with shape (num_nodes, num_clusters).
"""
if self._undirected:
x_p = torch.mm(features, self._w_p0)
x_p = self._relu(x_p)
x_p = self._dropout(x_p)
x_p = torch.mm(x_p, self._w_p1)
x_n = torch.mm(features, self._w_n0)
x_n = self._relu(x_n)
x_n = self._dropout(x_n)
x_n = torch.mm(x_n, self._w_n1)
z = self._simpa(A_p, A_n, x_p, x_n)
else:
x_sp = torch.mm(features, self._w_sp0)
x_sp = self._relu(x_sp)
x_sp = self._dropout(x_sp)
x_sp = torch.mm(x_sp, self._w_sp1)
x_sn = torch.mm(features, self._w_sn0)
x_sn = self._relu(x_sn)
x_sn = self._dropout(x_sn)
x_sn = torch.mm(x_sn, self._w_sn1)
x_tp = torch.mm(features, self._w_tp0)
x_tp = self._relu(x_tp)
x_tp = self._dropout(x_tp)
x_tp = torch.mm(x_tp, self._w_tp1)
x_tn = torch.mm(features, self._w_tn0)
x_tn = self._relu(x_tn)
x_tn = self._dropout(x_tn)
x_tn = torch.mm(x_tn, self._w_tn1)
z = self._simpa(A_p, A_n, x_sp, x_sn, x_tp, x_tn, A_pt, A_nt)
output = torch.mm(z, self._W_prob)
if self._bias is not None:
output = output + self._bias
predictions_cluster = torch.argmax(output, dim=1)
prob = F.softmax(output, dim=1)
output = F.log_softmax(output, dim=1)
return F.normalize(z), output, predictions_cluster, prob
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'nfeat': 4, 'hidden': 4, 'nclass': 4, 'dropout': 0.5,
'hop': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from typing import Optional
from typing import Tuple
import torch.nn as nn
from torch.nn.parameter import Parameter
from typing import Union
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_mul_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
x1 = xindex % 4
x2 = xindex // 4
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp4 = tl.load(in_ptr0 + 1)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp6 = tl.load(in_ptr2 + x0, xmask)
tmp9 = tl.load(in_ptr0 + 2)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK])
tmp11 = tl.load(in_ptr3 + x0, xmask)
tmp14 = tl.load(in_ptr0 + 3)
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp16 = tl.load(in_ptr4 + x0, xmask)
tmp19 = tl.load(in_ptr0 + 4)
tmp20 = tl.broadcast_to(tmp19, [XBLOCK])
tmp21 = tl.load(in_ptr5 + x0, xmask)
tmp24 = tl.load(in_ptr0 + 5)
tmp25 = tl.broadcast_to(tmp24, [XBLOCK])
tmp26 = tl.load(in_ptr6 + x0, xmask)
tmp29 = tl.load(in_ptr0 + 6)
tmp30 = tl.broadcast_to(tmp29, [XBLOCK])
tmp31 = tl.load(in_ptr7 + x0, xmask)
tmp34 = tl.load(in_ptr0 + 7)
tmp35 = tl.broadcast_to(tmp34, [XBLOCK])
tmp36 = tl.load(in_ptr8 + x0, xmask)
tmp39 = tl.load(in_ptr0 + 8)
tmp40 = tl.broadcast_to(tmp39, [XBLOCK])
tmp41 = tl.load(in_ptr9 + x0, xmask)
tmp44 = tl.load(in_ptr0 + 9)
tmp45 = tl.broadcast_to(tmp44, [XBLOCK])
tmp46 = tl.load(in_ptr10 + x0, xmask)
tmp3 = tmp1 * tmp2
tmp7 = tmp5 * tmp6
tmp8 = tmp3 + tmp7
tmp12 = tmp10 * tmp11
tmp13 = tmp8 + tmp12
tmp17 = tmp15 * tmp16
tmp18 = tmp13 + tmp17
tmp22 = tmp20 * tmp21
tmp23 = tmp18 + tmp22
tmp27 = tmp25 * tmp26
tmp28 = tmp23 + tmp27
tmp32 = tmp30 * tmp31
tmp33 = tmp28 + tmp32
tmp37 = tmp35 * tmp36
tmp38 = tmp33 + tmp37
tmp42 = tmp40 * tmp41
tmp43 = tmp38 + tmp42
tmp47 = tmp45 * tmp46
tmp48 = tmp43 + tmp47
tl.store(out_ptr0 + (x1 + 8 * x2), tmp48, xmask)
@triton.jit
def triton_poi_fused_add_mul_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr0 + 1)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp6 = tl.load(in_ptr2 + x2, xmask)
tmp9 = tl.load(in_ptr0 + 2)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK])
tmp11 = tl.load(in_ptr3 + x2, xmask)
tmp14 = tl.load(in_ptr0 + 3)
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp16 = tl.load(in_ptr4 + x2, xmask)
tmp19 = tl.load(in_ptr0 + 4)
tmp20 = tl.broadcast_to(tmp19, [XBLOCK])
tmp21 = tl.load(in_ptr5 + x2, xmask)
tmp3 = tmp1 * tmp2
tmp7 = tmp5 * tmp6
tmp8 = tmp3 + tmp7
tmp12 = tmp10 * tmp11
tmp13 = tmp8 + tmp12
tmp17 = tmp15 * tmp16
tmp18 = tmp13 + tmp17
tmp22 = tmp20 * tmp21
tmp23 = tmp18 + tmp22
tl.store(out_ptr0 + (x0 + 8 * x1), tmp23, xmask)
@triton.jit
def triton_poi_fused_argmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp32 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 > tmp1
tmp3 = tmp0 == tmp1
tmp4 = tmp0 != tmp0
tmp5 = tmp1 != tmp1
tmp6 = tmp4 > tmp5
tmp7 = tmp2 | tmp6
tmp8 = tmp4 & tmp5
tmp9 = tmp3 | tmp8
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = tl.full([1], 1, tl.int64)
tmp12 = tmp10 < tmp11
tmp13 = tmp9 & tmp12
tmp14 = tmp7 | tmp13
tmp15 = tl.where(tmp14, tmp0, tmp1)
tmp16 = tl.where(tmp14, tmp10, tmp11)
tmp18 = tmp15 > tmp17
tmp19 = tmp15 == tmp17
tmp20 = tmp15 != tmp15
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1], 2, tl.int64)
tmp27 = tmp16 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tmp23 | tmp28
tmp30 = tl.where(tmp29, tmp15, tmp17)
tmp31 = tl.where(tmp29, tmp16, tmp26)
tmp33 = tmp30 > tmp32
tmp34 = tmp30 == tmp32
tmp35 = tmp30 != tmp30
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1], 3, tl.int64)
tmp42 = tmp31 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tl.where(tmp44, tmp30, tmp32)
tmp46 = tl.where(tmp44, tmp31, tmp41)
tl.store(out_ptr0 + x0, tmp46, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax__softmax_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = tl_math.exp(tmp0)
tmp3 = tl_math.exp(tmp2)
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp3 + tmp5
tmp8 = tl_math.exp(tmp7)
tmp9 = tmp6 + tmp8
tmp11 = tl_math.exp(tmp10)
tmp12 = tmp9 + tmp11
tmp13 = tmp1 / tmp12
tmp14 = tl_math.log(tmp12)
tmp15 = tmp0 - tmp14
tl.store(out_ptr0 + x2, tmp13, xmask)
tl.store(out_ptr1 + x2, tmp15, xmask)
@triton.jit
def triton_per_fused_div_linalg_vector_norm_6(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 8 * x0), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp7 = 1e-12
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp0 / tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr0 + (r1 + 8 * x0), tmp9, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (5, 1), (1, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (10, 1), (1, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (8, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_2, primals_1, out=buf0)
del primals_1
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(16)](buf1, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf1, primals_3, out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_2, primals_4, out=buf3)
del primals_4
buf4 = buf3
del buf3
triton_poi_fused_relu_0[grid(16)](buf4, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf4, primals_5, out=buf5)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_7, buf5, out=buf6)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_9, buf6, out=buf7)
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_9, buf7, out=buf8)
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_9, buf8, out=buf9)
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_9, buf2, out=buf10)
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_9, buf5, out=buf11)
buf12 = buf5
del buf5
extern_kernels.mm(primals_7, buf11, out=buf12)
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_9, buf12, out=buf14)
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_9, buf14, out=buf15)
buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_9, buf11, out=buf17)
buf18 = buf11
del buf11
extern_kernels.mm(primals_7, buf17, out=buf18)
buf19 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_9, buf18, out=buf19)
buf22 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_9, buf17, out=buf22)
buf23 = buf17
del buf17
extern_kernels.mm(primals_7, buf22, out=buf23)
buf27 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
buf26 = reinterpret_tensor(buf27, (4, 4), (8, 1), 4)
triton_poi_fused_add_mul_1[grid(16)](primals_8, buf6, buf7, buf8,
buf9, buf12, buf14, buf15, buf18, buf19, buf23, buf26, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf16 = buf22
del buf22
extern_kernels.mm(primals_9, buf10, out=buf16)
buf21 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_9, buf16, out=buf21)
buf24 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_9, buf21, out=buf24)
buf25 = reinterpret_tensor(buf27, (4, 4), (8, 1), 0)
triton_poi_fused_add_mul_2[grid(16)](primals_6, buf2, buf10, buf16,
buf21, buf24, buf25, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf28 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_11, buf27, primals_10, alpha=1, beta=1,
out=buf28)
del primals_11
buf29 = empty_strided_cuda((4,), (1,), torch.int64)
triton_poi_fused_argmax_3[grid(4)](buf28, buf29, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf30 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(16)](buf28, buf30, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf31 = buf28
del buf28
buf32 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__log_softmax__softmax_5[grid(16)](buf30, buf31,
buf32, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf30
buf33 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf34 = reinterpret_tensor(buf33, (4, 1), (1, 1), 0)
del buf33
buf35 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
triton_per_fused_div_linalg_vector_norm_6[grid(4)](buf34, buf27,
buf35, 4, 8, XBLOCK=1, num_warps=2, num_stages=1)
return buf35, buf32, buf29, buf31, buf1, buf2, buf4, reinterpret_tensor(
primals_6, (1,), (1,), 0), buf6, reinterpret_tensor(primals_8, (1,),
(1,), 0), buf7, reinterpret_tensor(primals_8, (1,), (1,), 1
), buf8, reinterpret_tensor(primals_8, (1,), (1,), 2
), buf9, reinterpret_tensor(primals_8, (1,), (1,), 3
), buf10, reinterpret_tensor(primals_6, (1,), (1,), 1
), buf12, reinterpret_tensor(primals_8, (1,), (1,), 4
), buf14, reinterpret_tensor(primals_8, (1,), (1,), 5
), buf15, reinterpret_tensor(primals_8, (1,), (1,), 6
), buf16, reinterpret_tensor(primals_6, (1,), (1,), 2
), buf18, reinterpret_tensor(primals_8, (1,), (1,), 7
), buf19, reinterpret_tensor(primals_8, (1,), (1,), 8
), buf21, reinterpret_tensor(primals_6, (1,), (1,), 3
), buf23, reinterpret_tensor(primals_8, (1,), (1,), 9
), buf24, reinterpret_tensor(primals_6, (1,), (1,), 4
), buf27, buf31, buf32, buf34, reinterpret_tensor(primals_10, (4, 8
), (1, 4), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0
), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0
), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0
), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0
), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0)
class SIMPA(nn.Module):
"""The signed mixed-path aggregation model.
Args:
hop (int): Number of hops to consider.
directed (bool, optional): Whether the input network is directed or not. (default: :obj:`False`)
"""
def __init__(self, hop: 'int', directed: 'bool'=False):
super(SIMPA, self).__init__()
self._hop_p = hop + 1
self._hop_n = int((1 + hop) * hop / 2)
self._undirected = not directed
if self._undirected:
self._w_p = Parameter(torch.FloatTensor(self._hop_p, 1))
self._w_n = Parameter(torch.FloatTensor(self._hop_n, 1))
self._reset_parameters_undirected()
else:
self._w_sp = Parameter(torch.FloatTensor(self._hop_p, 1))
self._w_sn = Parameter(torch.FloatTensor(self._hop_n, 1))
self._w_tp = Parameter(torch.FloatTensor(self._hop_p, 1))
self._w_tn = Parameter(torch.FloatTensor(self._hop_n, 1))
self._reset_parameters_directed()
def _reset_parameters_undirected(self):
self._w_p.data.fill_(1.0)
self._w_n.data.fill_(1.0)
def _reset_parameters_directed(self):
self._w_sp.data.fill_(1.0)
self._w_sn.data.fill_(1.0)
self._w_tp.data.fill_(1.0)
self._w_tn.data.fill_(1.0)
def forward(self, A_p:
'Union[torch.FloatTensor, torch.sparse_coo_tensor]', A_n:
'Union[torch.FloatTensor, torch.sparse_coo_tensor]', x_p:
'torch.FloatTensor', x_n: 'torch.FloatTensor', x_pt:
'Optional[torch.FloatTensor]'=None, x_nt:
'Optional[torch.FloatTensor]'=None, A_pt:
'Optional[Union[torch.FloatTensor, torch.sparse_coo_tensor]]'=None,
A_nt: 'Optional[Union[torch.FloatTensor, torch.sparse_coo_tensor]]'
=None) ->Tuple[torch.FloatTensor, torch.FloatTensor, torch.
LongTensor, torch.FloatTensor]:
"""
Making a forward pass of SIMPA.
Arg types:
* **A_p** (PyTorch FloatTensor or PyTorch sparse_coo_tensor) - Row-normalized positive part of the adjacency matrix.
* **A_n** (PyTorch FloatTensor or PyTorch sparse_coo_tensor) - Row-normalized negative part of the adjacency matrix.
* **x_p** (PyTorch FloatTensor) - Souce positive hidden representations.
* **x_n** (PyTorch FloatTensor) - Souce negative hidden representations.
* **x_pt** (PyTorch FloatTensor, optional) - Target positive hidden representations. Default: None.
* **x_nt** (PyTorch FloatTensor, optional) - Target negative hidden representations. Default: None.
* **A_pt** (PyTorch FloatTensor or PyTorch sparse_coo_tensor, optional) - Transpose of column-normalized
positive part of the adjacency matrix. Default: None.
* **A_nt** (PyTorch FloatTensor or PyTorch sparse_coo_tensor, optional) - Transpose of column-normalized
negative part of the adjacency matrix. Default: None.
Return types:
* **feat** (PyTorch FloatTensor) - Embedding matrix, with shape (num_nodes, 2*input_dim) for undirected graphs
and (num_nodes, 4*input_dim) for directed graphs.
"""
if self._undirected:
feat_p = self._w_p[0] * x_p
feat_n = torch.zeros_like(feat_p)
curr_p = x_p.clone()
curr_n_aux = x_n.clone()
j = 0
for h in range(0, self._hop_p):
if h > 0:
curr_p = torch.matmul(A_p, curr_p)
curr_n_aux = torch.matmul(A_p, curr_n_aux)
feat_p += self._w_p[h] * curr_p
if h != self._hop_p - 1:
curr_n = torch.matmul(A_n, curr_n_aux)
feat_n += self._w_n[j] * curr_n
j += 1
for _ in range(self._hop_p - 2 - h):
curr_n = torch.matmul(A_p, curr_n)
feat_n += self._w_n[j] * curr_n
j += 1
feat = torch.cat([feat_p, feat_n], dim=1)
else:
A_sp = A_p
A_sn = A_n
A_tp = A_pt
A_tn = A_nt
x_sp = x_p
x_sn = x_n
feat_sp = self._w_sp[0] * x_sp
feat_sn = torch.zeros_like(feat_sp)
feat_tp = self._w_tp[0] * x_pt
feat_tn = torch.zeros_like(feat_tp)
curr_sp = x_sp.clone()
curr_sn_aux = x_sn.clone()
curr_tp = x_pt.clone()
curr_tn_aux = x_nt.clone()
j = 0
for h in range(0, self._hop_p):
if h > 0:
curr_sp = torch.matmul(A_sp, curr_sp)
curr_sn_aux = torch.matmul(A_sp, curr_sn_aux)
curr_tp = torch.matmul(A_tp, curr_tp)
curr_tn_aux = torch.matmul(A_tp, curr_tn_aux)
feat_sp += self._w_sp[h] * curr_sp
feat_tp += self._w_tp[h] * curr_tp
if h != self._hop_p - 1:
curr_sn = torch.matmul(A_sn, curr_sn_aux)
curr_tn = torch.matmul(A_tn, curr_tn_aux)
feat_sn += self._w_sn[j] * curr_sn
feat_tn += self._w_tn[j] * curr_tn
j += 1
for _ in range(self._hop_p - 2 - h):
curr_sn = torch.matmul(A_sp, curr_sn)
curr_tn = torch.matmul(A_tp, curr_tn)
feat_sn += self._w_sn[j] * curr_sn
feat_tn += self._w_tn[j] * curr_tn
j += 1
feat = torch.cat([feat_sp, feat_sn, feat_tp, feat_tn], dim=1)
return feat
class SSSNETNew(nn.Module):
"""The signed graph clustering model.
Args:
nfeat (int): Number of features.
hidden (int): Hidden dimensions of the initial MLP.
nclass (int): Number of clusters.
dropout (float): Dropout probability.
hop (int): Number of hops to consider.
directed (bool, optional): Whether the input network is directed or not. (default: :obj:`False`)
bias (bool, optional): If set to :obj:`False`, the layer will not learn
an additive bias. (default: :obj:`True`)
"""
def __init__(self, nfeat: 'int', hidden: 'int', nclass: 'int', dropout:
'float', hop: 'int', directed: 'bool'=False, bias: 'bool'=True):
super(SSSNETNew, self).__init__()
nh1 = hidden
nh2 = hidden
self._num_clusters = int(nclass)
self._simpa = SIMPA(hop, directed)
if bias:
self._bias = Parameter(torch.FloatTensor(self._num_clusters))
else:
self.register_parameter('_bias', None)
self._relu = nn.ReLU()
self._dropout = nn.Dropout(p=dropout)
self._undirected = not directed
if self._undirected:
self._w_p0 = Parameter(torch.FloatTensor(nfeat, nh1))
self._w_p1 = Parameter(torch.FloatTensor(nh1, nh2))
self._w_n0 = Parameter(torch.FloatTensor(nfeat, nh1))
self._w_n1 = Parameter(torch.FloatTensor(nh1, nh2))
self._W_prob = Parameter(torch.FloatTensor(2 * nh2, self.
_num_clusters))
self._reset_parameters_undirected()
else:
self._w_sp0 = Parameter(torch.FloatTensor(nfeat, nh1))
self._w_sp1 = Parameter(torch.FloatTensor(nh1, nh2))
self._w_sn0 = Parameter(torch.FloatTensor(nfeat, nh1))
self._w_sn1 = Parameter(torch.FloatTensor(nh1, nh2))
self._w_tp0 = Parameter(torch.FloatTensor(nfeat, nh1))
self._w_tp1 = Parameter(torch.FloatTensor(nh1, nh2))
self._w_tn0 = Parameter(torch.FloatTensor(nfeat, nh1))
self._w_tn1 = Parameter(torch.FloatTensor(nh1, nh2))
self._W_prob = Parameter(torch.FloatTensor(4 * nh2, self.
_num_clusters))
self._reset_parameters_directed()
def _reset_parameters_undirected(self):
nn.init.xavier_uniform_(self._w_p0, gain=1.414)
nn.init.xavier_uniform_(self._w_p1, gain=1.414)
nn.init.xavier_uniform_(self._w_n0, gain=1.414)
nn.init.xavier_uniform_(self._w_n1, gain=1.414)
if self._bias is not None:
self._bias.data.fill_(0.0)
nn.init.xavier_uniform_(self._W_prob, gain=1.414)
def _reset_parameters_directed(self):
nn.init.xavier_uniform_(self._w_sp0, gain=1.414)
nn.init.xavier_uniform_(self._w_sp1, gain=1.414)
nn.init.xavier_uniform_(self._w_sn0, gain=1.414)
nn.init.xavier_uniform_(self._w_sn1, gain=1.414)
nn.init.xavier_uniform_(self._w_tp0, gain=1.414)
nn.init.xavier_uniform_(self._w_tp1, gain=1.414)
nn.init.xavier_uniform_(self._w_tn0, gain=1.414)
nn.init.xavier_uniform_(self._w_tn1, gain=1.414)
if self._bias is not None:
self._bias.data.fill_(0.0)
nn.init.xavier_uniform_(self._W_prob, gain=1.414)
def forward(self, input_0, input_1, input_2):
primals_11 = self._bias
primals_1 = self._w_p0
primals_2 = self._w_p1
primals_3 = self._w_n0
primals_4 = self._w_n1
primals_10 = self._W_prob
primals_6 = self._simpa._w_p
primals_8 = self._simpa._w_n
primals_5 = input_0
primals_7 = input_1
primals_9 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0], output[1], output[2], output[3]
| SherylHYX/SSSNET_Signed_Clustering | SSSNET | false | 17,925 | [
"MIT"
] | 5 | 85736c18e86b396d64177d22b8c7f9859dfd794c | https://github.com/SherylHYX/SSSNET_Signed_Clustering/tree/85736c18e86b396d64177d22b8c7f9859dfd794c |
DenseNet_conv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/oc/coc7ychloxbwvdngqu6bharipzfn4z4jq7yscuexym45tudkmm5a.py
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# h => cat
# Graph fragment:
# %cat : [num_users=6] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 6
x0 = xindex % 16
x2 = (xindex // 96)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (32*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 6, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (16*((-2) + x1)) + (64*x2)), tmp6 & xmask, other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/4j/c4jmm7z6i2rhzh6zlgdqwtfp6weoy7srpsbdxrcse4kl5sufztwf.py
# Topologically Sorted Source Nodes: [h_1, h_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# h_1 => convolution
# h_2 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_3, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 3072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 48
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/uc/cucbyfky4n3djftgxjgfhv6ewn6qbgdkldpnsie5fjgvtfjocqj2.py
# Topologically Sorted Source Nodes: [h_5], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# h_5 => cat_1
# Graph fragment:
# %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%cat, %relu_1], 1), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 18
x0 = xindex % 16
x2 = (xindex // 288)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 6, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (96*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 18, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (16*((-6) + x1)) + (192*x2)), tmp6 & xmask, other=0.0)
tmp10 = tl.load(in_ptr2 + ((-6) + x1), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp6, tmp13, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tl.store(out_ptr0 + (x3), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/e5/ce5fpwsvci4fwpvtygte43vpdkxluvjg5xpj4y27fv4xgnkrv2ev.py
# Topologically Sorted Source Nodes: [h_10], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# h_10 => cat_2
# Graph fragment:
# %cat_2 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%cat, %relu_1, %relu_3], 1), kwargs = {})
triton_poi_fused_cat_3 = async_compile.triton('triton_poi_fused_cat_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1920
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 30
x0 = xindex % 16
x2 = (xindex // 480)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 6, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (96*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 18, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + (16*((-6) + x1)) + (192*x2)), tmp9 & xmask, other=0.0)
tmp11 = tl.load(in_ptr2 + ((-6) + x1), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = tl.full([1], 0, tl.int32)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp9, tmp14, tmp15)
tmp17 = tmp0 >= tmp7
tmp18 = tl.full([1], 30, tl.int64)
tmp19 = tmp0 < tmp18
tmp20 = tl.load(in_ptr3 + (x0 + (16*((-18) + x1)) + (192*x2)), tmp17 & xmask, other=0.0)
tmp21 = tl.load(in_ptr4 + ((-18) + x1), tmp17 & xmask, eviction_policy='evict_last', other=0.0)
tmp22 = tmp20 + tmp21
tmp23 = triton_helpers.maximum(tmp13, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp17, tmp23, tmp24)
tmp26 = tl.where(tmp9, tmp16, tmp25)
tmp27 = tl.where(tmp4, tmp5, tmp26)
tl.store(out_ptr0 + (x3), tmp27, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/7o/c7ojwsh6u7nuwmggyizakrdaw47xpi65lfcmwcj7hq4ipwjiqbat.py
# Topologically Sorted Source Nodes: [h_15], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# h_15 => cat_3
# Graph fragment:
# %cat_3 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%cat, %relu_1, %relu_3, %relu_5], 1), kwargs = {})
triton_poi_fused_cat_4 = async_compile.triton('triton_poi_fused_cat_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2688
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 42
x0 = xindex % 16
x2 = (xindex // 672)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 6, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (96*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 18, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + (16*((-6) + x1)) + (192*x2)), tmp9 & xmask, other=0.0)
tmp11 = tl.load(in_ptr2 + ((-6) + x1), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = tl.full([1], 0, tl.int32)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp9, tmp14, tmp15)
tmp17 = tmp0 >= tmp7
tmp18 = tl.full([1], 30, tl.int64)
tmp19 = tmp0 < tmp18
tmp20 = tmp17 & tmp19
tmp21 = tl.load(in_ptr3 + (x0 + (16*((-18) + x1)) + (192*x2)), tmp20 & xmask, other=0.0)
tmp22 = tl.load(in_ptr4 + ((-18) + x1), tmp20 & xmask, eviction_policy='evict_last', other=0.0)
tmp23 = tmp21 + tmp22
tmp24 = triton_helpers.maximum(tmp13, tmp23)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp20, tmp24, tmp25)
tmp27 = tmp0 >= tmp18
tmp28 = tl.full([1], 42, tl.int64)
tmp29 = tmp0 < tmp28
tmp30 = tl.load(in_ptr5 + (x0 + (16*((-30) + x1)) + (192*x2)), tmp27 & xmask, other=0.0)
tmp31 = tl.load(in_ptr6 + ((-30) + x1), tmp27 & xmask, eviction_policy='evict_last', other=0.0)
tmp32 = tmp30 + tmp31
tmp33 = triton_helpers.maximum(tmp13, tmp32)
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp27, tmp33, tmp34)
tmp36 = tl.where(tmp20, tmp26, tmp35)
tmp37 = tl.where(tmp9, tmp16, tmp36)
tmp38 = tl.where(tmp4, tmp5, tmp37)
tl.store(out_ptr0 + (x3), tmp38, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ik/cikbggtjvc2lac5ryikkabdocsx7hvm6z36zds6luvth2d2k2kzz.py
# Topologically Sorted Source Nodes: [h_20], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# h_20 => cat_4
# Graph fragment:
# %cat_4 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%cat, %relu_1, %relu_3, %relu_5, %relu_7], 1), kwargs = {})
triton_poi_fused_cat_5 = async_compile.triton('triton_poi_fused_cat_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 3456
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 54
x0 = xindex % 16
x2 = (xindex // 864)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 6, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (96*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 18, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + (16*((-6) + x1)) + (192*x2)), tmp9 & xmask, other=0.0)
tmp11 = tl.load(in_ptr2 + ((-6) + x1), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = tl.full([1], 0, tl.int32)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp9, tmp14, tmp15)
tmp17 = tmp0 >= tmp7
tmp18 = tl.full([1], 30, tl.int64)
tmp19 = tmp0 < tmp18
tmp20 = tmp17 & tmp19
tmp21 = tl.load(in_ptr3 + (x0 + (16*((-18) + x1)) + (192*x2)), tmp20 & xmask, other=0.0)
tmp22 = tl.load(in_ptr4 + ((-18) + x1), tmp20 & xmask, eviction_policy='evict_last', other=0.0)
tmp23 = tmp21 + tmp22
tmp24 = triton_helpers.maximum(tmp13, tmp23)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp20, tmp24, tmp25)
tmp27 = tmp0 >= tmp18
tmp28 = tl.full([1], 42, tl.int64)
tmp29 = tmp0 < tmp28
tmp30 = tmp27 & tmp29
tmp31 = tl.load(in_ptr5 + (x0 + (16*((-30) + x1)) + (192*x2)), tmp30 & xmask, other=0.0)
tmp32 = tl.load(in_ptr6 + ((-30) + x1), tmp30 & xmask, eviction_policy='evict_last', other=0.0)
tmp33 = tmp31 + tmp32
tmp34 = triton_helpers.maximum(tmp13, tmp33)
tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype)
tmp36 = tl.where(tmp30, tmp34, tmp35)
tmp37 = tmp0 >= tmp28
tmp38 = tl.full([1], 54, tl.int64)
tmp39 = tmp0 < tmp38
tmp40 = tl.load(in_ptr7 + (x0 + (16*((-42) + x1)) + (192*x2)), tmp37 & xmask, other=0.0)
tmp41 = tl.load(in_ptr8 + ((-42) + x1), tmp37 & xmask, eviction_policy='evict_last', other=0.0)
tmp42 = tmp40 + tmp41
tmp43 = triton_helpers.maximum(tmp13, tmp42)
tmp44 = tl.full(tmp43.shape, 0.0, tmp43.dtype)
tmp45 = tl.where(tmp37, tmp43, tmp44)
tmp46 = tl.where(tmp30, tmp36, tmp45)
tmp47 = tl.where(tmp20, tmp26, tmp46)
tmp48 = tl.where(tmp9, tmp16, tmp47)
tmp49 = tl.where(tmp4, tmp5, tmp48)
tl.store(out_ptr0 + (x3), tmp49, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/in/cinn2kx6mcnzindp3pzcqqbrquffjq2qhepkb2lovwx56fxirbpe.py
# Topologically Sorted Source Nodes: [h_23, h_24], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# h_23 => convolution_9
# h_24 => relu_9
# Graph fragment:
# %convolution_9 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_8, %primals_21, %primals_22, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_9 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_9,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_9, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_6 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_6(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 12
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ty/ctynlltnfcpbo7rliw77hc5rck67mxcypnkf4jqrwhiyqsrifjnl.py
# Topologically Sorted Source Nodes: [h_18, h_19], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# h_18 => convolution_7
# h_19 => relu_7
# Graph fragment:
# %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_6, %primals_17, %primals_18, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_7 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_7,), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_7, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_7 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 12
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22 = args
args.clear()
assert_size_stride(primals_1, (4, 2, 4, 4), (32, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (48, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_4, (48, ), (1, ))
assert_size_stride(primals_5, (12, 48, 3, 3), (432, 9, 3, 1))
assert_size_stride(primals_6, (12, ), (1, ))
assert_size_stride(primals_7, (48, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_8, (48, ), (1, ))
assert_size_stride(primals_9, (12, 48, 3, 3), (432, 9, 3, 1))
assert_size_stride(primals_10, (12, ), (1, ))
assert_size_stride(primals_11, (48, 30, 1, 1), (30, 1, 1, 1))
assert_size_stride(primals_12, (48, ), (1, ))
assert_size_stride(primals_13, (12, 48, 3, 3), (432, 9, 3, 1))
assert_size_stride(primals_14, (12, ), (1, ))
assert_size_stride(primals_15, (48, 42, 1, 1), (42, 1, 1, 1))
assert_size_stride(primals_16, (48, ), (1, ))
assert_size_stride(primals_17, (12, 48, 3, 3), (432, 9, 3, 1))
assert_size_stride(primals_18, (12, ), (1, ))
assert_size_stride(primals_19, (48, 54, 1, 1), (54, 1, 1, 1))
assert_size_stride(primals_20, (48, ), (1, ))
assert_size_stride(primals_21, (12, 48, 3, 3), (432, 9, 3, 1))
assert_size_stride(primals_22, (12, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 6, 4, 4), (96, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 384, grid=grid(384), stream=stream0)
del primals_1
del primals_2
# Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 48, 4, 4), (768, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [h_1, h_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf2, primals_4, 3072, grid=grid(3072), stream=stream0)
del primals_4
# Topologically Sorted Source Nodes: [h_3], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 12, 4, 4), (192, 16, 4, 1))
buf4 = empty_strided_cuda((4, 18, 4, 4), (288, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_5], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(buf0, buf3, primals_6, buf4, 1152, grid=grid(1152), stream=stream0)
# Topologically Sorted Source Nodes: [h_6], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf4, primals_7, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 48, 4, 4), (768, 16, 4, 1))
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [h_6, h_7], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf6, primals_8, 3072, grid=grid(3072), stream=stream0)
del primals_8
# Topologically Sorted Source Nodes: [h_8], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf6, primals_9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 12, 4, 4), (192, 16, 4, 1))
buf8 = empty_strided_cuda((4, 30, 4, 4), (480, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_10], Original ATen: [aten.cat]
triton_poi_fused_cat_3.run(buf0, buf3, primals_6, buf7, primals_10, buf8, 1920, grid=grid(1920), stream=stream0)
# Topologically Sorted Source Nodes: [h_11], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf8, primals_11, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 48, 4, 4), (768, 16, 4, 1))
buf10 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [h_11, h_12], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf10, primals_12, 3072, grid=grid(3072), stream=stream0)
del primals_12
# Topologically Sorted Source Nodes: [h_13], Original ATen: [aten.convolution]
buf11 = extern_kernels.convolution(buf10, primals_13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 12, 4, 4), (192, 16, 4, 1))
buf12 = empty_strided_cuda((4, 42, 4, 4), (672, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_15], Original ATen: [aten.cat]
triton_poi_fused_cat_4.run(buf0, buf3, primals_6, buf7, primals_10, buf11, primals_14, buf12, 2688, grid=grid(2688), stream=stream0)
# Topologically Sorted Source Nodes: [h_16], Original ATen: [aten.convolution]
buf13 = extern_kernels.convolution(buf12, primals_15, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 48, 4, 4), (768, 16, 4, 1))
buf14 = buf13; del buf13 # reuse
# Topologically Sorted Source Nodes: [h_16, h_17], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf14, primals_16, 3072, grid=grid(3072), stream=stream0)
del primals_16
# Topologically Sorted Source Nodes: [h_18], Original ATen: [aten.convolution]
buf15 = extern_kernels.convolution(buf14, primals_17, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 12, 4, 4), (192, 16, 4, 1))
buf16 = empty_strided_cuda((4, 54, 4, 4), (864, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_20], Original ATen: [aten.cat]
triton_poi_fused_cat_5.run(buf0, buf3, primals_6, buf7, primals_10, buf11, primals_14, buf15, primals_18, buf16, 3456, grid=grid(3456), stream=stream0)
# Topologically Sorted Source Nodes: [h_21], Original ATen: [aten.convolution]
buf17 = extern_kernels.convolution(buf16, primals_19, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 48, 4, 4), (768, 16, 4, 1))
buf18 = buf17; del buf17 # reuse
# Topologically Sorted Source Nodes: [h_21, h_22], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf18, primals_20, 3072, grid=grid(3072), stream=stream0)
del primals_20
# Topologically Sorted Source Nodes: [h_23], Original ATen: [aten.convolution]
buf19 = extern_kernels.convolution(buf18, primals_21, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 12, 4, 4), (192, 16, 4, 1))
buf20 = buf19; del buf19 # reuse
buf21 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [h_23, h_24], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_6.run(buf20, primals_22, buf21, 768, grid=grid(768), stream=stream0)
del primals_22
buf22 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [h_18, h_19], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_7.run(buf15, primals_18, buf22, 768, grid=grid(768), stream=stream0)
del buf15
del primals_18
buf23 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [h_13, h_14], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_7.run(buf11, primals_14, buf23, 768, grid=grid(768), stream=stream0)
del buf11
del primals_14
buf24 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [h_8, h_9], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_7.run(buf7, primals_10, buf24, 768, grid=grid(768), stream=stream0)
del buf7
del primals_10
buf25 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [h_3, h_4], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_7.run(buf3, primals_6, buf25, 768, grid=grid(768), stream=stream0)
del buf3
del primals_6
return (buf20, primals_3, primals_5, primals_7, primals_9, primals_11, primals_13, primals_15, primals_17, primals_19, primals_21, buf0, buf2, buf4, buf6, buf8, buf10, buf12, buf14, buf16, buf18, buf21, buf22, buf23, buf24, buf25, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 2, 4, 4), (32, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((48, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((48, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((12, 48, 3, 3), (432, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((48, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((48, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((12, 48, 3, 3), (432, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((48, 30, 1, 1), (30, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((48, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((12, 48, 3, 3), (432, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((48, 42, 1, 1), (42, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((48, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((12, 48, 3, 3), (432, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((48, 54, 1, 1), (54, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((48, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((12, 48, 3, 3), (432, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def xavier_init(module, gain=1, bias=0, distribution='normal'):
assert distribution in ['uniform', 'normal']
if distribution == 'uniform':
nn.init.xavier_uniform_(module.weight, gain=gain)
else:
nn.init.xavier_normal_(module.weight, gain=gain)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
class DenseNet_conv(nn.Module):
"""
doc
"""
def __init__(self, in_c, L=5, k=12, bn=False):
"""
dense block
:param in_c: input channel number
:param L: layer number in dense block
:param k: output channels of each layer in dense block
:param bn: using bn or not
"""
super(DenseNet_conv, self).__init__()
self.L = L
self.k = k
self.bn = bn
self.conv1s = []
self.conv2s = []
self.bn1s = []
self.bn2s = []
for i in range(self.L):
channel_in = i * self.k + in_c + 2
conv1 = nn.Conv2d(channel_in, self.k * 4, kernel_size=1, stride=1)
setattr(self, 'conv1_%i' % i, conv1)
xavier_init(conv1)
self.conv1s.append(conv1)
if self.bn:
bn1 = nn.BatchNorm2d(num_features=self.k * 4)
setattr(self, 'bn1_%i' % i, bn1)
self.bn1s.append(bn1)
conv2 = nn.Conv2d(self.k * 4, self.k, kernel_size=3, stride=1,
padding=1)
setattr(self, 'conv2_%i' % i, conv2)
xavier_init(conv2)
self.conv2s.append(conv2)
if self.bn:
bn2 = nn.BatchNorm2d(num_features=self.k)
setattr(self, 'bn2_%i' % i, bn2)
self.bn2s.append(bn2)
def forward(self, x, sparse_inputs):
"""
dense block
:param x: x
:param sparse_inputs: sparse image (s1,s2), 2 channels
:return:
"""
hs = []
h = torch.cat((x, sparse_inputs), 1)
hs.append(h)
for i in range(self.L):
if i != 0:
h = torch.cat(hs, 1)
h = self.conv1s[i](h)
if self.bn:
h = self.bn1s[i](h)
h = torch.relu(h)
h = self.conv2s[i](h)
if self.bn:
h = self.bn2s[i](h)
h = torch.relu(h)
if i != self.L - 1:
hs.append(h)
return h
def get_inputs():
return [torch.rand([4, 2, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_c': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 6
x0 = xindex % 16
x2 = xindex // 96
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 32 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 6, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-2 + x1) + 64 * x2), tmp6 & xmask,
other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 3072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 48
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 18
x0 = xindex % 16
x2 = xindex // 288
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 6, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 96 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 18, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-6 + x1) + 192 * x2), tmp6 & xmask,
other=0.0)
tmp10 = tl.load(in_ptr2 + (-6 + x1), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp6, tmp13, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tl.store(out_ptr0 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1920
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 30
x0 = xindex % 16
x2 = xindex // 480
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 6, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 96 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 18, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 16 * (-6 + x1) + 192 * x2), tmp9 &
xmask, other=0.0)
tmp11 = tl.load(in_ptr2 + (-6 + x1), tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = tl.full([1], 0, tl.int32)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp9, tmp14, tmp15)
tmp17 = tmp0 >= tmp7
tl.full([1], 30, tl.int64)
tmp20 = tl.load(in_ptr3 + (x0 + 16 * (-18 + x1) + 192 * x2), tmp17 &
xmask, other=0.0)
tmp21 = tl.load(in_ptr4 + (-18 + x1), tmp17 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp22 = tmp20 + tmp21
tmp23 = triton_helpers.maximum(tmp13, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp17, tmp23, tmp24)
tmp26 = tl.where(tmp9, tmp16, tmp25)
tmp27 = tl.where(tmp4, tmp5, tmp26)
tl.store(out_ptr0 + x3, tmp27, xmask)
@triton.jit
def triton_poi_fused_cat_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 2688
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 42
x0 = xindex % 16
x2 = xindex // 672
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 6, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 96 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 18, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 16 * (-6 + x1) + 192 * x2), tmp9 &
xmask, other=0.0)
tmp11 = tl.load(in_ptr2 + (-6 + x1), tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = tl.full([1], 0, tl.int32)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp9, tmp14, tmp15)
tmp17 = tmp0 >= tmp7
tmp18 = tl.full([1], 30, tl.int64)
tmp19 = tmp0 < tmp18
tmp20 = tmp17 & tmp19
tmp21 = tl.load(in_ptr3 + (x0 + 16 * (-18 + x1) + 192 * x2), tmp20 &
xmask, other=0.0)
tmp22 = tl.load(in_ptr4 + (-18 + x1), tmp20 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp23 = tmp21 + tmp22
tmp24 = triton_helpers.maximum(tmp13, tmp23)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp20, tmp24, tmp25)
tmp27 = tmp0 >= tmp18
tl.full([1], 42, tl.int64)
tmp30 = tl.load(in_ptr5 + (x0 + 16 * (-30 + x1) + 192 * x2), tmp27 &
xmask, other=0.0)
tmp31 = tl.load(in_ptr6 + (-30 + x1), tmp27 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp32 = tmp30 + tmp31
tmp33 = triton_helpers.maximum(tmp13, tmp32)
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp27, tmp33, tmp34)
tmp36 = tl.where(tmp20, tmp26, tmp35)
tmp37 = tl.where(tmp9, tmp16, tmp36)
tmp38 = tl.where(tmp4, tmp5, tmp37)
tl.store(out_ptr0 + x3, tmp38, xmask)
@triton.jit
def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 3456
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 54
x0 = xindex % 16
x2 = xindex // 864
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 6, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 96 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 18, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 16 * (-6 + x1) + 192 * x2), tmp9 &
xmask, other=0.0)
tmp11 = tl.load(in_ptr2 + (-6 + x1), tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = tl.full([1], 0, tl.int32)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp9, tmp14, tmp15)
tmp17 = tmp0 >= tmp7
tmp18 = tl.full([1], 30, tl.int64)
tmp19 = tmp0 < tmp18
tmp20 = tmp17 & tmp19
tmp21 = tl.load(in_ptr3 + (x0 + 16 * (-18 + x1) + 192 * x2), tmp20 &
xmask, other=0.0)
tmp22 = tl.load(in_ptr4 + (-18 + x1), tmp20 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp23 = tmp21 + tmp22
tmp24 = triton_helpers.maximum(tmp13, tmp23)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp20, tmp24, tmp25)
tmp27 = tmp0 >= tmp18
tmp28 = tl.full([1], 42, tl.int64)
tmp29 = tmp0 < tmp28
tmp30 = tmp27 & tmp29
tmp31 = tl.load(in_ptr5 + (x0 + 16 * (-30 + x1) + 192 * x2), tmp30 &
xmask, other=0.0)
tmp32 = tl.load(in_ptr6 + (-30 + x1), tmp30 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp33 = tmp31 + tmp32
tmp34 = triton_helpers.maximum(tmp13, tmp33)
tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype)
tmp36 = tl.where(tmp30, tmp34, tmp35)
tmp37 = tmp0 >= tmp28
tl.full([1], 54, tl.int64)
tmp40 = tl.load(in_ptr7 + (x0 + 16 * (-42 + x1) + 192 * x2), tmp37 &
xmask, other=0.0)
tmp41 = tl.load(in_ptr8 + (-42 + x1), tmp37 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp42 = tmp40 + tmp41
tmp43 = triton_helpers.maximum(tmp13, tmp42)
tmp44 = tl.full(tmp43.shape, 0.0, tmp43.dtype)
tmp45 = tl.where(tmp37, tmp43, tmp44)
tmp46 = tl.where(tmp30, tmp36, tmp45)
tmp47 = tl.where(tmp20, tmp26, tmp46)
tmp48 = tl.where(tmp9, tmp16, tmp47)
tmp49 = tl.where(tmp4, tmp5, tmp48)
tl.store(out_ptr0 + x3, tmp49, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_6(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 12
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr0 + x3, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_7(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 12
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22) = args
args.clear()
assert_size_stride(primals_1, (4, 2, 4, 4), (32, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (48, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_4, (48,), (1,))
assert_size_stride(primals_5, (12, 48, 3, 3), (432, 9, 3, 1))
assert_size_stride(primals_6, (12,), (1,))
assert_size_stride(primals_7, (48, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_8, (48,), (1,))
assert_size_stride(primals_9, (12, 48, 3, 3), (432, 9, 3, 1))
assert_size_stride(primals_10, (12,), (1,))
assert_size_stride(primals_11, (48, 30, 1, 1), (30, 1, 1, 1))
assert_size_stride(primals_12, (48,), (1,))
assert_size_stride(primals_13, (12, 48, 3, 3), (432, 9, 3, 1))
assert_size_stride(primals_14, (12,), (1,))
assert_size_stride(primals_15, (48, 42, 1, 1), (42, 1, 1, 1))
assert_size_stride(primals_16, (48,), (1,))
assert_size_stride(primals_17, (12, 48, 3, 3), (432, 9, 3, 1))
assert_size_stride(primals_18, (12,), (1,))
assert_size_stride(primals_19, (48, 54, 1, 1), (54, 1, 1, 1))
assert_size_stride(primals_20, (48,), (1,))
assert_size_stride(primals_21, (12, 48, 3, 3), (432, 9, 3, 1))
assert_size_stride(primals_22, (12,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 6, 4, 4), (96, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(384)](primals_1, primals_2, buf0, 384,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 48, 4, 4), (768, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_relu_1[grid(3072)](buf2, primals_4,
3072, XBLOCK=128, num_warps=4, num_stages=1)
del primals_4
buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 12, 4, 4), (192, 16, 4, 1))
buf4 = empty_strided_cuda((4, 18, 4, 4), (288, 16, 4, 1), torch.float32
)
triton_poi_fused_cat_2[grid(1152)](buf0, buf3, primals_6, buf4,
1152, XBLOCK=128, num_warps=4, num_stages=1)
buf5 = extern_kernels.convolution(buf4, primals_7, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 48, 4, 4), (768, 16, 4, 1))
buf6 = buf5
del buf5
triton_poi_fused_convolution_relu_1[grid(3072)](buf6, primals_8,
3072, XBLOCK=128, num_warps=4, num_stages=1)
del primals_8
buf7 = extern_kernels.convolution(buf6, primals_9, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 12, 4, 4), (192, 16, 4, 1))
buf8 = empty_strided_cuda((4, 30, 4, 4), (480, 16, 4, 1), torch.float32
)
triton_poi_fused_cat_3[grid(1920)](buf0, buf3, primals_6, buf7,
primals_10, buf8, 1920, XBLOCK=256, num_warps=4, num_stages=1)
buf9 = extern_kernels.convolution(buf8, primals_11, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 48, 4, 4), (768, 16, 4, 1))
buf10 = buf9
del buf9
triton_poi_fused_convolution_relu_1[grid(3072)](buf10, primals_12,
3072, XBLOCK=128, num_warps=4, num_stages=1)
del primals_12
buf11 = extern_kernels.convolution(buf10, primals_13, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 12, 4, 4), (192, 16, 4, 1))
buf12 = empty_strided_cuda((4, 42, 4, 4), (672, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_4[grid(2688)](buf0, buf3, primals_6, buf7,
primals_10, buf11, primals_14, buf12, 2688, XBLOCK=128,
num_warps=4, num_stages=1)
buf13 = extern_kernels.convolution(buf12, primals_15, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 48, 4, 4), (768, 16, 4, 1))
buf14 = buf13
del buf13
triton_poi_fused_convolution_relu_1[grid(3072)](buf14, primals_16,
3072, XBLOCK=128, num_warps=4, num_stages=1)
del primals_16
buf15 = extern_kernels.convolution(buf14, primals_17, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 12, 4, 4), (192, 16, 4, 1))
buf16 = empty_strided_cuda((4, 54, 4, 4), (864, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_5[grid(3456)](buf0, buf3, primals_6, buf7,
primals_10, buf11, primals_14, buf15, primals_18, buf16, 3456,
XBLOCK=128, num_warps=4, num_stages=1)
buf17 = extern_kernels.convolution(buf16, primals_19, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 48, 4, 4), (768, 16, 4, 1))
buf18 = buf17
del buf17
triton_poi_fused_convolution_relu_1[grid(3072)](buf18, primals_20,
3072, XBLOCK=128, num_warps=4, num_stages=1)
del primals_20
buf19 = extern_kernels.convolution(buf18, primals_21, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 12, 4, 4), (192, 16, 4, 1))
buf20 = buf19
del buf19
buf21 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_6[grid(768)](buf20
, primals_22, buf21, 768, XBLOCK=128, num_warps=4, num_stages=1)
del primals_22
buf22 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_7[grid(768)](buf15
, primals_18, buf22, 768, XBLOCK=128, num_warps=4, num_stages=1)
del buf15
del primals_18
buf23 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_7[grid(768)](buf11
, primals_14, buf23, 768, XBLOCK=128, num_warps=4, num_stages=1)
del buf11
del primals_14
buf24 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_7[grid(768)](buf7,
primals_10, buf24, 768, XBLOCK=128, num_warps=4, num_stages=1)
del buf7
del primals_10
buf25 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_7[grid(768)](buf3,
primals_6, buf25, 768, XBLOCK=128, num_warps=4, num_stages=1)
del buf3
del primals_6
return (buf20, primals_3, primals_5, primals_7, primals_9, primals_11,
primals_13, primals_15, primals_17, primals_19, primals_21, buf0,
buf2, buf4, buf6, buf8, buf10, buf12, buf14, buf16, buf18, buf21,
buf22, buf23, buf24, buf25)
def xavier_init(module, gain=1, bias=0, distribution='normal'):
assert distribution in ['uniform', 'normal']
if distribution == 'uniform':
nn.init.xavier_uniform_(module.weight, gain=gain)
else:
nn.init.xavier_normal_(module.weight, gain=gain)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
class DenseNet_convNew(nn.Module):
"""
doc
"""
def __init__(self, in_c, L=5, k=12, bn=False):
"""
dense block
:param in_c: input channel number
:param L: layer number in dense block
:param k: output channels of each layer in dense block
:param bn: using bn or not
"""
super(DenseNet_convNew, self).__init__()
self.L = L
self.k = k
self.bn = bn
self.conv1s = []
self.conv2s = []
self.bn1s = []
self.bn2s = []
for i in range(self.L):
channel_in = i * self.k + in_c + 2
conv1 = nn.Conv2d(channel_in, self.k * 4, kernel_size=1, stride=1)
setattr(self, 'conv1_%i' % i, conv1)
xavier_init(conv1)
self.conv1s.append(conv1)
if self.bn:
bn1 = nn.BatchNorm2d(num_features=self.k * 4)
setattr(self, 'bn1_%i' % i, bn1)
self.bn1s.append(bn1)
conv2 = nn.Conv2d(self.k * 4, self.k, kernel_size=3, stride=1,
padding=1)
setattr(self, 'conv2_%i' % i, conv2)
xavier_init(conv2)
self.conv2s.append(conv2)
if self.bn:
bn2 = nn.BatchNorm2d(num_features=self.k)
setattr(self, 'bn2_%i' % i, bn2)
self.bn2s.append(bn2)
def forward(self, input_0, input_1):
primals_3 = self.conv1_0.weight
primals_4 = self.conv1_0.bias
primals_5 = self.conv2_0.weight
primals_6 = self.conv2_0.bias
primals_7 = self.conv1_1.weight
primals_8 = self.conv1_1.bias
primals_9 = self.conv2_1.weight
primals_10 = self.conv2_1.bias
primals_11 = self.conv1_2.weight
primals_12 = self.conv1_2.bias
primals_13 = self.conv2_2.weight
primals_14 = self.conv2_2.bias
primals_15 = self.conv1_3.weight
primals_16 = self.conv1_3.bias
primals_17 = self.conv2_3.weight
primals_18 = self.conv2_3.bias
primals_19 = self.conv1_4.weight
primals_20 = self.conv1_4.bias
primals_21 = self.conv2_4.weight
primals_22 = self.conv2_4.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22])
return output[0]
| Shiaoming/DensefromRGBS | DenseNet_conv | false | 17,926 | [
"MIT"
] | 7 | d69f5f60c5512da876b002a2007ec42d4a3fbb8e | https://github.com/Shiaoming/DensefromRGBS/tree/d69f5f60c5512da876b002a2007ec42d4a3fbb8e |
TripletLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/5d/c5dtcurhbtazyithtpfbdetxra6jy6ksi52gzmuuc2ffkwgnlw4w.py
# Topologically Sorted Source Nodes: [sub, pow_1, distance_positive, sub_1, pow_2, distance_negative, sub_2, add, losses, loss], Original ATen: [aten.sub, aten.pow, aten.sum, aten.add, aten.relu, aten.mean]
# Source node to ATen node mapping:
# add => add
# distance_negative => sum_2
# distance_positive => sum_1
# loss => mean
# losses => relu
# pow_1 => pow_1
# pow_2 => pow_2
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg2_1), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_2, [1]), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, %sum_2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_2, 4), kwargs = {})
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%relu,), kwargs = {})
triton_per_fused_add_mean_pow_relu_sub_sum_0 = async_compile.triton('triton_per_fused_add_mean_pow_relu_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_pow_relu_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mean_pow_relu_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = (rindex // 16)
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None)
tmp1 = tl.load(in_ptr1 + (r0 + (64*r1)), None)
tmp4 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None)
tmp5 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None)
tmp9 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None)
tmp10 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None)
tmp14 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None)
tmp15 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None)
tmp19 = tl.load(in_ptr2 + (r0 + (64*r1)), None)
tmp22 = tl.load(in_ptr2 + (16 + r0 + (64*r1)), None)
tmp26 = tl.load(in_ptr2 + (32 + r0 + (64*r1)), None)
tmp30 = tl.load(in_ptr2 + (48 + r0 + (64*r1)), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp20 = tmp0 - tmp19
tmp21 = tmp20 * tmp20
tmp23 = tmp4 - tmp22
tmp24 = tmp23 * tmp23
tmp25 = tmp21 + tmp24
tmp27 = tmp9 - tmp26
tmp28 = tmp27 * tmp27
tmp29 = tmp25 + tmp28
tmp31 = tmp14 - tmp30
tmp32 = tmp31 * tmp31
tmp33 = tmp29 + tmp32
tmp34 = tmp18 - tmp33
tmp35 = 4.0
tmp36 = tmp34 + tmp35
tmp37 = tl.full([1, 1], 0, tl.int32)
tmp38 = triton_helpers.maximum(tmp37, tmp36)
tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK])
tmp41 = tl.sum(tmp39, 1)[:, None]
tmp42 = 64.0
tmp43 = tmp41 / tmp42
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp43, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [sub, pow_1, distance_positive, sub_1, pow_2, distance_negative, sub_2, add, losses, loss], Original ATen: [aten.sub, aten.pow, aten.sum, aten.add, aten.relu, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_add_mean_pow_relu_sub_sum_0.run(buf2, arg0_1, arg1_1, arg2_1, 1, 64, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torchvision.transforms import functional as F
import torch.utils.data
from torch import nn
import torch.nn.functional as F
class TripletLoss(nn.Module):
"""
Triplet loss
Takes embeddings [N*dim_embed] of an anchor sample, a positive sample and a negative sample
"""
def __init__(self, margin):
super(TripletLoss, self).__init__()
self.margin = margin
def forward(self, anchor, positive, negative, size_average=True):
distance_positive = (anchor - positive).pow(2).sum(1)
distance_negative = (anchor - negative).pow(2).sum(1)
losses = F.relu(distance_positive - distance_negative + self.margin)
if size_average:
loss = losses.mean()
else:
loss = losses.sum()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'margin': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mean_pow_relu_sub_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp4 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp9 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp10 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp14 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp15 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp19 = tl.load(in_ptr2 + (r0 + 64 * r1), None)
tmp22 = tl.load(in_ptr2 + (16 + r0 + 64 * r1), None)
tmp26 = tl.load(in_ptr2 + (32 + r0 + 64 * r1), None)
tmp30 = tl.load(in_ptr2 + (48 + r0 + 64 * r1), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp20 = tmp0 - tmp19
tmp21 = tmp20 * tmp20
tmp23 = tmp4 - tmp22
tmp24 = tmp23 * tmp23
tmp25 = tmp21 + tmp24
tmp27 = tmp9 - tmp26
tmp28 = tmp27 * tmp27
tmp29 = tmp25 + tmp28
tmp31 = tmp14 - tmp30
tmp32 = tmp31 * tmp31
tmp33 = tmp29 + tmp32
tmp34 = tmp18 - tmp33
tmp35 = 4.0
tmp36 = tmp34 + tmp35
tmp37 = tl.full([1, 1], 0, tl.int32)
tmp38 = triton_helpers.maximum(tmp37, tmp36)
tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK])
tmp41 = tl.sum(tmp39, 1)[:, None]
tmp42 = 64.0
tmp43 = tmp41 / tmp42
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp43, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_add_mean_pow_relu_sub_sum_0[grid(1)](buf2, arg0_1,
arg1_1, arg2_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf2,
class TripletLossNew(nn.Module):
"""
Triplet loss
Takes embeddings [N*dim_embed] of an anchor sample, a positive sample and a negative sample
"""
def __init__(self, margin):
super(TripletLossNew, self).__init__()
self.margin = margin
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| Sigma10010/nuclei_cells_det | TripletLoss | false | 17,927 | [
"MIT"
] | 4 | c074175fec8938472bb4cddabd83d1d0ea78f230 | https://github.com/Sigma10010/nuclei_cells_det/tree/c074175fec8938472bb4cddabd83d1d0ea78f230 |
CPULayerNorm | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/dp/cdpg7pr5bgrycbjwdpax25ogal6yhivibai2mh5orecczmpc33sn.py
# Topologically Sorted Source Nodes: [mean, sub, std, add, truediv, mul, add_1], Original ATen: [aten.mean, aten.sub, aten.std, aten.add, aten.div, aten.mul]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# mean => mean
# mul => mul
# std => sqrt, var
# sub => sub
# truediv => div
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %mean), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%arg0_1, [-1]), kwargs = {correction: 1.0, keepdim: True})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-06), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %add), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %div), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %arg2_1), kwargs = {})
triton_poi_fused_add_div_mean_mul_std_sub_0 = async_compile.triton('triton_poi_fused_add_div_mean_mul_std_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_std_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + (x2), xmask)
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp2 - tmp10
tmp13 = tmp12 * tmp12
tmp14 = tmp3 - tmp10
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp10
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp7 - tmp10
tmp21 = tmp20 * tmp20
tmp22 = tmp19 + tmp21
tmp23 = 3.0
tmp24 = tmp22 / tmp23
tmp25 = libdevice.sqrt(tmp24)
tmp26 = 1e-06
tmp27 = tmp25 + tmp26
tmp28 = tmp11 / tmp27
tmp29 = tmp0 * tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + (x2), tmp31, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean, sub, std, add, truediv, mul, add_1], Original ATen: [aten.mean, aten.sub, aten.std, aten.add, aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_std_sub_0.run(arg1_1, arg0_1, arg2_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class CPULayerNorm(nn.Module):
def __init__(self, features, eps=1e-06):
super().__init__()
self.features = features
self.eps = eps
def forward(self, x, gamma, beta):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return gamma * ((x - mean) / (std + self.eps)) + beta
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'features': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + x2, xmask)
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp2 - tmp10
tmp13 = tmp12 * tmp12
tmp14 = tmp3 - tmp10
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp10
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp7 - tmp10
tmp21 = tmp20 * tmp20
tmp22 = tmp19 + tmp21
tmp23 = 3.0
tmp24 = tmp22 / tmp23
tmp25 = libdevice.sqrt(tmp24)
tmp26 = 1e-06
tmp27 = tmp25 + tmp26
tmp28 = tmp11 / tmp27
tmp29 = tmp0 * tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + x2, tmp31, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_std_sub_0[grid(256)](arg1_1,
arg0_1, arg2_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf0,
class CPULayerNormNew(nn.Module):
def __init__(self, features, eps=1e-06):
super().__init__()
self.features = features
self.eps = eps
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| Smerity/pytorch-qrnn | CPULayerNorm | false | 17,928 | [
"BSD-3-Clause"
] | 4 | 907c8ea53f689136fcc50996b6474de967745202 | https://github.com/Smerity/pytorch-qrnn/tree/907c8ea53f689136fcc50996b6474de967745202 |
MixerBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/bm/cbmvwkhgioz63mnhrh3onxemouh4axyclce6ay7mypmzm62glj7h.py
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ot/cotcoijafru4h453dtmpjobeuelenstvljb2jefxzxevlyidmwj2.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# x => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x5 = (xindex // 4)
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr1 + (x5), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x5), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/46/c46ojkebc2fch6yo3vprq6vn7lqsqvncgo6dfxqszdveo5lhova3.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.add, aten.gelu]
# Source node to ATen node mapping:
# x => add_2
# x_1 => add_3, erf, mul_2, mul_3, mul_4
# Graph fragment:
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_5), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 0.5), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 0.7071067811865476), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_3,), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %add_3), kwargs = {})
triton_poi_fused_add_gelu_2 = async_compile.triton('triton_poi_fused_add_gelu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_gelu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_gelu_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = 0.7071067811865476
tmp6 = tmp2 * tmp5
tmp7 = libdevice.erf(tmp6)
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = tmp4 * tmp9
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/q4/cq4bbsv3b4qqg4ddtlsryoevmi3ae7xelz7ejgww5irntnnmwxjs.py
# Topologically Sorted Source Nodes: [x_5, y_2], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# x_5 => add_4
# y_2 => var_mean_1
# Graph fragment:
# %add_4 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %permute_3), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_4, [3]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_3 = async_compile.triton('triton_poi_fused_add_native_layer_norm_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (4*x3), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + ((4*x1) + (16*x0) + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x3)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x1) + (16*x0) + (64*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x3)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x1) + (16*x0) + (64*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x3)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x1) + (16*x0) + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x3), tmp16, xmask)
tl.store(out_ptr1 + (x3), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/mf/cmf4kynkd2xh6vd4bbmhtwlqmbyyoz6l3glyovgortkdnnpxuimx.py
# Topologically Sorted Source Nodes: [x_5, y_2], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# x_5 => add_4
# y_2 => add_5, add_6, mul_5, mul_6, rsqrt_1, sub_1
# Graph fragment:
# %add_4 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %permute_3), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_5,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_4, %getitem_3), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt_1), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %primals_8), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_6, %primals_9), kwargs = {})
triton_poi_fused_add_native_layer_norm_4 = async_compile.triton('triton_poi_fused_add_native_layer_norm_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x5 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask)
tmp3 = tl.load(in_ptr2 + (x5), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x5), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x4), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/lz/clz3fj5jsxq47tfqzi2ci4xlyomnrq3nhenupnvxqpebcyco2f3i.py
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.gelu]
# Source node to ATen node mapping:
# x_7 => add_7, erf_1, mul_7, mul_8, mul_9
# Graph fragment:
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, 0.5), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, 0.7071067811865476), kwargs = {})
# %erf_1 : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_8,), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf_1, 1), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_7, %add_7), kwargs = {})
triton_poi_fused_gelu_5 = async_compile.triton('triton_poi_fused_gelu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gelu_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/pd/cpdw5tzglvujszyvcx6cndwbqsnwrul6dm4w2hnj6fxdg7djvirx.py
# Topologically Sorted Source Nodes: [x_5, x_11], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x_11 => add_8
# x_5 => add_4
# Graph fragment:
# %add_4 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %permute_3), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %view_7), kwargs = {})
triton_poi_fused_add_6 = async_compile.triton('triton_poi_fused_add_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask)
tmp3 = tl.load(in_out_ptr0 + (x4), xmask)
tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + (x4), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (4, 16), (16, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (16, 4), (4, 1))
assert_size_stride(primals_11, (16, ), (1, ))
assert_size_stride(primals_12, (4, 16), (16, 1))
assert_size_stride(primals_13, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(primals_3, buf0, buf1, 64, grid=grid(64), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(primals_3, buf0, buf1, primals_1, primals_2, buf2, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_2
buf3 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.add, aten.gelu]
triton_poi_fused_add_gelu_2.run(buf3, primals_5, buf4, 1024, grid=grid(1024), stream=stream0)
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf4, (64, 16), (16, 1), 0), reinterpret_tensor(primals_6, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf5)
del primals_7
buf6 = buf1; del buf1 # reuse
buf7 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_5, y_2], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_3.run(primals_3, buf5, buf6, buf7, 64, grid=grid(64), stream=stream0)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5, y_2], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_4.run(primals_3, buf5, buf6, buf7, primals_8, primals_9, buf8, 256, grid=grid(256), stream=stream0)
del buf6
del buf7
del primals_9
buf9 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, reinterpret_tensor(buf8, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf9)
del primals_11
buf10 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.gelu]
triton_poi_fused_gelu_5.run(buf9, buf10, 1024, grid=grid(1024), stream=stream0)
buf11 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf10, (64, 16), (16, 1), 0), reinterpret_tensor(primals_12, (16, 4), (1, 16), 0), out=buf11)
buf12 = reinterpret_tensor(buf11, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf11 # reuse
# Topologically Sorted Source Nodes: [x_5, x_11], Original ATen: [aten.add]
triton_poi_fused_add_6.run(buf12, primals_3, buf5, primals_13, 256, grid=grid(256), stream=stream0)
del primals_13
return (buf12, primals_3, primals_5, primals_8, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), buf3, reinterpret_tensor(buf4, (64, 16), (16, 1), 0), buf5, reinterpret_tensor(buf8, (64, 4), (4, 1), 0), buf9, reinterpret_tensor(buf10, (64, 16), (16, 1), 0), primals_12, primals_10, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def drop_path(x, drop_prob: 'float'=0.0, training: 'bool'=False):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.
device)
random_tensor.floor_()
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class VanillaMlp(nn.Module):
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
"""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class MixerBlock(nn.Module):
def __init__(self, num_patch, dim, token_mlp_ratio, channel_mlp_ratio,
drop=0.0, drop_path=0.0, norm_layer=nn.LayerNorm, act_layer=nn.GELU):
super().__init__()
self.norm1 = norm_layer(dim)
self.norm2 = norm_layer(dim)
token_mlp_dim = round(dim * token_mlp_ratio)
channel_mlp_dim = round(dim * channel_mlp_ratio)
self.drop_path = DropPath(drop_path
) if drop_path > 0.0 else nn.Identity()
self.token_mix = VanillaMlp(num_patch, token_mlp_dim, num_patch,
act_layer, drop)
self.channel_mix = VanillaMlp(dim, channel_mlp_dim, dim, act_layer,
drop)
def forward(self, x):
y = self.norm1(x).transpose(1, 2)
y = self.drop_path(self.token_mix(y)).transpose(1, 2)
x = x + y
y = self.norm2(x)
x = x + self.drop_path(self.channel_mix(y))
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_patch': 4, 'dim': 4, 'token_mlp_ratio': 4,
'channel_mlp_ratio': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x5 = xindex // 4
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp8, xmask)
@triton.jit
def triton_poi_fused_add_gelu_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = 0.7071067811865476
tmp6 = tmp2 * tmp5
tmp7 = libdevice.erf(tmp6)
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = tmp4 * tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + 4 * x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4 * x1 + 16 * x0 + 64 * x2), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x3), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x1 + 16 * x0 + 64 * x2), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x3), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x1 + 16 * x0 + 64 * x2), xmask,
eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x1 + 16 * x0 + 64 * x2), xmask,
eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x3, tmp16, xmask)
tl.store(out_ptr1 + x3, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x5 = xindex // 4
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tmp3 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x5, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x4, tmp13, xmask)
@triton.jit
def triton_poi_fused_gelu_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tmp3 = tl.load(in_out_ptr0 + x4, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x4, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (4, 16), (16, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (16, 4), (4, 1))
assert_size_stride(primals_11, (16,), (1,))
assert_size_stride(primals_12, (4, 16), (16, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(64)](primals_3, buf0,
buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_1[grid(256)](primals_3, buf0, buf1,
primals_1, primals_2, buf2, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_1
del primals_2
buf3 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.
float32)
triton_poi_fused_add_gelu_2[grid(1024)](buf3, primals_5, buf4, 1024,
XBLOCK=128, num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf4, (64, 16),
(16, 1), 0), reinterpret_tensor(primals_6, (16, 4), (1, 16), 0),
alpha=1, beta=1, out=buf5)
del primals_7
buf6 = buf1
del buf1
buf7 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_3[grid(64)](primals_3, buf5,
buf6, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_4[grid(256)](primals_3, buf5,
buf6, buf7, primals_8, primals_9, buf8, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf6
del buf7
del primals_9
buf9 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_11, reinterpret_tensor(buf8, (64, 4),
(4, 1), 0), reinterpret_tensor(primals_10, (4, 16), (1, 4), 0),
alpha=1, beta=1, out=buf9)
del primals_11
buf10 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.
float32)
triton_poi_fused_gelu_5[grid(1024)](buf9, buf10, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf11 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf10, (64, 16), (16, 1), 0),
reinterpret_tensor(primals_12, (16, 4), (1, 16), 0), out=buf11)
buf12 = reinterpret_tensor(buf11, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf11
triton_poi_fused_add_6[grid(256)](buf12, primals_3, buf5,
primals_13, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_13
return buf12, primals_3, primals_5, primals_8, reinterpret_tensor(buf2,
(64, 4), (4, 1), 0), buf3, reinterpret_tensor(buf4, (64, 16), (16,
1), 0), buf5, reinterpret_tensor(buf8, (64, 4), (4, 1), 0
), buf9, reinterpret_tensor(buf10, (64, 16), (16, 1), 0
), primals_12, primals_10, primals_6, primals_4
def drop_path(x, drop_prob: 'float'=0.0, training: 'bool'=False):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.
device)
random_tensor.floor_()
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class VanillaMlp(nn.Module):
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
"""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class MixerBlockNew(nn.Module):
def __init__(self, num_patch, dim, token_mlp_ratio, channel_mlp_ratio,
drop=0.0, drop_path=0.0, norm_layer=nn.LayerNorm, act_layer=nn.GELU):
super().__init__()
self.norm1 = norm_layer(dim)
self.norm2 = norm_layer(dim)
token_mlp_dim = round(dim * token_mlp_ratio)
channel_mlp_dim = round(dim * channel_mlp_ratio)
self.drop_path = DropPath(drop_path
) if drop_path > 0.0 else nn.Identity()
self.token_mix = VanillaMlp(num_patch, token_mlp_dim, num_patch,
act_layer, drop)
self.channel_mix = VanillaMlp(dim, channel_mlp_dim, dim, act_layer,
drop)
def forward(self, input_0):
primals_1 = self.norm1.weight
primals_2 = self.norm1.bias
primals_7 = self.norm2.weight
primals_8 = self.norm2.bias
primals_4 = self.token_mix.fc1.weight
primals_5 = self.token_mix.fc1.bias
primals_6 = self.token_mix.fc2.weight
primals_9 = self.token_mix.fc2.bias
primals_10 = self.channel_mix.fc1.weight
primals_11 = self.channel_mix.fc1.bias
primals_12 = self.channel_mix.fc2.weight
primals_13 = self.channel_mix.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
| Sense-GVT/BigPretrain | MixerBlock | false | 17,929 | [
"Apache-2.0"
] | 8 | d8d9b43d94dd1364c18c1e5ba21b85a31cdbba9e | https://github.com/Sense-GVT/BigPretrain/tree/d8d9b43d94dd1364c18c1e5ba21b85a31cdbba9e |
SelfAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/hx/chxnd7dab4aswk7u325dpvdry5kyc53ubpnanqkvekfo4ipxu2sd.py
# Topologically Sorted Source Nodes: [truediv, energy_1], Original ATen: [aten.div, aten._softmax]
# Source node to ATen node mapping:
# energy_1 => amax, clone, exp, sub, sum_1
# truediv => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_9, 0.5), kwargs = {})
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%div,), kwargs = {memory_format: torch.contiguous_format})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%clone, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
triton_poi_fused__softmax_div_0 = async_compile.triton('triton_poi_fused__softmax_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_div_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp6 = tmp0 * tmp5
tmp7 = tmp6 * tmp3
tmp8 = triton_helpers.maximum(tmp4, tmp7)
tmp10 = tmp0 * tmp9
tmp11 = tmp10 * tmp3
tmp12 = triton_helpers.maximum(tmp8, tmp11)
tmp14 = tmp0 * tmp13
tmp15 = tmp14 * tmp3
tmp16 = triton_helpers.maximum(tmp12, tmp15)
tmp17 = tmp4 - tmp16
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp7 - tmp16
tmp20 = tl_math.exp(tmp19)
tmp21 = tmp18 + tmp20
tmp22 = tmp11 - tmp16
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp21 + tmp23
tmp25 = tmp15 - tmp16
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tl.store(out_ptr0 + (x3), tmp16, xmask)
tl.store(out_ptr1 + (x3), tmp27, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/iu/ciueecyadnmplad54kixsceiozfd66cy4xtpprbzgdbkamk26l5u.py
# Topologically Sorted Source Nodes: [truediv, energy_1], Original ATen: [aten.div, aten._softmax]
# Source node to ATen node mapping:
# energy_1 => amax, clone, div_1, exp, sub, sum_1
# truediv => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_9, 0.5), kwargs = {})
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%div,), kwargs = {memory_format: torch.contiguous_format})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%clone, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_div_1 = async_compile.triton('triton_poi_fused__softmax_div_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_div_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = (xindex // 4)
x0 = xindex % 4
x1 = (xindex // 4) % 4
x3 = (xindex // 64)
x2 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x1 + (4*x0) + (16*x3)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + (x4), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tl.store(out_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/54/c54khy3kuyof4sdv2ql6opiswaujxwjln6ov32klfsevrkag5vzt.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# out => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_8,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, YBLOCK])
tmp3 = tmp0 + tmp2
tl.store(out_ptr0 + (x2 + (4*y3)), tmp3, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/qd/cqd6znasiebptydynbfaeh5uwzji33pahjlhp6tr4nshmxhvbx5l.py
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# out_2 => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view_15, %view_14], -1), kwargs = {})
triton_poi_fused_cat_3 = async_compile.triton('triton_poi_fused_cat_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x3 = (xindex // 8)
x1 = (xindex // 8) % 4
x2 = (xindex // 32)
x4 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x3) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x1 + (4*((-4) + x0)) + (16*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x4), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 1), (16, 4, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 1), (16, 4, 1, 1))
assert_size_stride(primals_3, (4, 4, 4, 1), (16, 4, 1, 1))
assert_size_stride(primals_4, (1, 1), (1, 1))
assert_size_stride(primals_5, (1, ), (1, ))
assert_size_stride(primals_6, (1, 1), (1, 1))
assert_size_stride(primals_7, (1, ), (1, ))
assert_size_stride(primals_8, (1, 1), (1, 1))
assert_size_stride(primals_9, (1, ), (1, ))
assert_size_stride(primals_10, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_11, (4, 8), (8, 1))
assert_size_stride(primals_12, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [query_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (64, 1), (1, 1), 0), primals_4, alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [key_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(primals_2, (64, 1), (1, 1), 0), primals_6, alpha=1, beta=1, out=buf3)
del primals_6
del primals_7
buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 1), (1, 1), 0), primals_8, out=buf4)
del primals_8
buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32)
# Topologically Sorted Source Nodes: [truediv, energy_1], Original ATen: [aten.div, aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_div_0.run(buf1, buf3, buf5, buf6, 64, grid=grid(64), stream=stream0)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [truediv, energy_1], Original ATen: [aten.div, aten._softmax]
triton_poi_fused__softmax_div_1.run(buf1, buf3, buf5, buf6, buf7, 256, grid=grid(256), stream=stream0)
del buf5
buf8 = reinterpret_tensor(buf6, (4, 4, 4, 1, 1), (16, 4, 1, 1, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf4, primals_9, buf8, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_9
buf9 = reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.cat]
triton_poi_fused_cat_3.run(buf1, buf9, buf10, 128, grid=grid(128), stream=stream0)
buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_12, reinterpret_tensor(buf10, (16, 8), (8, 1), 0), reinterpret_tensor(primals_11, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf11)
del primals_12
return (reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (64, 1), (1, 1), 0), buf1, reinterpret_tensor(primals_2, (64, 1), (1, 1), 0), buf3, reinterpret_tensor(primals_3, (64, 1), (1, 1), 0), buf7, reinterpret_tensor(buf10, (16, 8), (8, 1), 0), primals_11, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 1), (16, 4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 1), (16, 4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 1), (16, 4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class SelfAttention(nn.Module):
def __init__(self, embed_dims, heads):
super(SelfAttention, self).__init__()
self.heads = heads
self.embed_dims = embed_dims
self.depth = embed_dims // heads
self.query = nn.Linear(self.depth, self.depth)
self.key = nn.Linear(self.depth, self.depth)
self.value = nn.Linear(self.depth, self.depth)
self.fc_out = nn.Linear(self.depth * self.heads * 2, self.embed_dims)
def forward(self, query, key, value, mask):
batch, q_len, k_len, v_len = query.shape[0], query.shape[1], key.shape[
1], value.shape[1]
query = query.reshape(batch, q_len, self.heads, self.depth)
key = key.reshape(batch, k_len, self.heads, self.depth)
value = value.reshape(batch, v_len, self.heads, self.depth)
query = self.query(query)
key = self.key(key)
value = self.value(value)
energy = torch.einsum('bqhd, bkhd -> bhqk', [query, key])
if mask is not None:
energy.masked_fill(mask == 0, float('-1e20'))
energy = torch.softmax(energy / (self.depth ** 1 / 2), dim=-1)
out = torch.einsum('bhqv, bvhd -> bqhd', [energy, value])
out = out.reshape(batch, q_len, self.heads * self.depth)
query = query.reshape(batch, q_len, self.heads * self.depth)
out = torch.cat([query, out], dim=-1)
out = self.fc_out(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 1]), torch.rand([4, 4, 4, 1]), torch.rand(
[4, 4, 4, 1]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'embed_dims': 4, 'heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_div_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr1 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr1 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr1 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp6 = tmp0 * tmp5
tmp7 = tmp6 * tmp3
tmp8 = triton_helpers.maximum(tmp4, tmp7)
tmp10 = tmp0 * tmp9
tmp11 = tmp10 * tmp3
tmp12 = triton_helpers.maximum(tmp8, tmp11)
tmp14 = tmp0 * tmp13
tmp15 = tmp14 * tmp3
tmp16 = triton_helpers.maximum(tmp12, tmp15)
tmp17 = tmp4 - tmp16
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp7 - tmp16
tmp20 = tl_math.exp(tmp19)
tmp21 = tmp18 + tmp20
tmp22 = tmp11 - tmp16
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp21 + tmp23
tmp25 = tmp15 - tmp16
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tl.store(out_ptr0 + x3, tmp16, xmask)
tl.store(out_ptr1 + x3, tmp27, xmask)
@triton.jit
def triton_poi_fused__softmax_div_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex // 4
x0 = xindex % 4
x1 = xindex // 4 % 4
x3 = xindex // 64
x2 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x1 + 4 * x0 + 16 * x3), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp9, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, YBLOCK])
tmp3 = tmp0 + tmp2
tl.store(out_ptr0 + (x2 + 4 * y3), tmp3, xmask & ymask)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x3 = xindex // 8
x1 = xindex // 8 % 4
x2 = xindex // 32
x4 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x3 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (x1 + 4 * (-4 + x0) + 16 * x2), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x4, tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 1), (16, 4, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 1), (16, 4, 1, 1))
assert_size_stride(primals_3, (4, 4, 4, 1), (16, 4, 1, 1))
assert_size_stride(primals_4, (1, 1), (1, 1))
assert_size_stride(primals_5, (1,), (1,))
assert_size_stride(primals_6, (1, 1), (1, 1))
assert_size_stride(primals_7, (1,), (1,))
assert_size_stride(primals_8, (1, 1), (1, 1))
assert_size_stride(primals_9, (1,), (1,))
assert_size_stride(primals_10, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_11, (4, 8), (8, 1))
assert_size_stride(primals_12, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (64,
1), (1, 1), 0), primals_4, alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(primals_2, (64,
1), (1, 1), 0), primals_6, alpha=1, beta=1, out=buf3)
del primals_6
del primals_7
buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 1), (1, 1), 0),
primals_8, out=buf4)
del primals_8
buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_div_0[grid(64)](buf1, buf3, buf5, buf6,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_div_1[grid(256)](buf1, buf3, buf5, buf6,
buf7, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf5
buf8 = reinterpret_tensor(buf6, (4, 4, 4, 1, 1), (16, 4, 1, 1, 1), 0)
del buf6
triton_poi_fused_clone_2[grid(16, 4)](buf4, primals_9, buf8, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_9
buf9 = reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 1), 0)
del buf4
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
triton_poi_fused_cat_3[grid(128)](buf1, buf9, buf10, 128, XBLOCK=
128, num_warps=4, num_stages=1)
buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0)
del buf9
extern_kernels.addmm(primals_12, reinterpret_tensor(buf10, (16, 8),
(8, 1), 0), reinterpret_tensor(primals_11, (8, 4), (1, 8), 0),
alpha=1, beta=1, out=buf11)
del primals_12
return reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_1, (64, 1), (1, 1), 0
), buf1, reinterpret_tensor(primals_2, (64, 1), (1, 1), 0
), buf3, reinterpret_tensor(primals_3, (64, 1), (1, 1), 0
), buf7, reinterpret_tensor(buf10, (16, 8), (8, 1), 0
), primals_11, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0)
class SelfAttentionNew(nn.Module):
def __init__(self, embed_dims, heads):
super(SelfAttentionNew, self).__init__()
self.heads = heads
self.embed_dims = embed_dims
self.depth = embed_dims // heads
self.query = nn.Linear(self.depth, self.depth)
self.key = nn.Linear(self.depth, self.depth)
self.value = nn.Linear(self.depth, self.depth)
self.fc_out = nn.Linear(self.depth * self.heads * 2, self.embed_dims)
def forward(self, input_0, input_1, input_2, input_3):
primals_4 = self.query.weight
primals_5 = self.query.bias
primals_6 = self.key.weight
primals_7 = self.key.bias
primals_8 = self.value.weight
primals_9 = self.value.bias
primals_11 = self.fc_out.weight
primals_12 = self.fc_out.bias
primals_1 = input_0
primals_2 = input_1
primals_3 = input_2
primals_10 = input_3
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
| ShivamRajSharma/Transformer-Text-To-Spech | SelfAttention | false | 17,930 | [
"MIT"
] | 10 | 2e1cf84a791497e414fb72ae04d954fce934a32a | https://github.com/ShivamRajSharma/Transformer-Text-To-Spech/tree/2e1cf84a791497e414fb72ae04d954fce934a32a |
ContrastiveLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/v2/cv2ohk4vpat6jimblbxldlcmj6xwbdtpffu3zhdixmnvm47unics.py
# Topologically Sorted Source Nodes: [sub, pow_1, distances], Original ATen: [aten.sub, aten.pow, aten.sum]
# Source node to ATen node mapping:
# distances => sum_1
# pow_1 => pow_1
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg2_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {})
triton_poi_fused_pow_sub_sum_0 = async_compile.triton('triton_poi_fused_pow_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_pow_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_pow_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (64*x1)), xmask)
tmp4 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp5 = tl.load(in_ptr1 + (16 + x0 + (64*x1)), xmask)
tmp9 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp10 = tl.load(in_ptr1 + (32 + x0 + (64*x1)), xmask)
tmp14 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp15 = tl.load(in_ptr1 + (48 + x0 + (64*x1)), xmask)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tl.store(out_ptr0 + (x2), tmp18, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/zr/czr2vrtmp2zzmoldsxojgyuiyfeueaymvvtixpuppz45iypjtjb3.py
# Topologically Sorted Source Nodes: [mul, mul_1, add, add_1, sqrt, sub_1, relu, pow_2, mul_2, add_2, losses, mean], Original ATen: [aten.mul, aten.add, aten.sqrt, aten.rsub, aten.relu, aten.pow, aten.mean]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# losses => mul_3
# mean => mean
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# pow_2 => pow_2
# relu => relu
# sqrt => sqrt
# sub_1 => sub_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %sum_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, -1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-09), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (4, %sqrt), kwargs = {})
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_1,), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%relu, 2), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %pow_2), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 0.5), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_3,), kwargs = {})
triton_per_fused_add_mean_mul_pow_relu_rsub_sqrt_1 = async_compile.triton('triton_per_fused_add_mean_mul_pow_relu_rsub_sqrt_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_mul_pow_relu_rsub_sqrt_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mean_mul_pow_relu_rsub_sqrt_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r2 = rindex
r0 = rindex % 64
tmp0 = tl.load(in_ptr0 + (r2), None)
tmp1 = tl.load(in_ptr1 + (r0), None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = -1.0
tmp4 = tmp0 * tmp3
tmp5 = 1.0
tmp6 = tmp4 + tmp5
tmp7 = 1e-09
tmp8 = tmp1 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tmp10 = 4.0
tmp11 = tmp10 - tmp9
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp14 = tmp13 * tmp13
tmp15 = tmp6 * tmp14
tmp16 = tmp2 + tmp15
tmp17 = 0.5
tmp18 = tmp16 * tmp17
tmp19 = tl.broadcast_to(tmp18, [RBLOCK])
tmp21 = triton_helpers.promote_to_tensor(tl.sum(tmp19, 0))
tmp22 = 256.0
tmp23 = tmp21 / tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp23, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, pow_1, distances], Original ATen: [aten.sub, aten.pow, aten.sum]
stream0 = get_raw_stream(0)
triton_poi_fused_pow_sub_sum_0.run(arg1_1, arg2_1, buf0, 64, grid=grid(64), stream=stream0)
del arg1_1
del arg2_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [mul, mul_1, add, add_1, sqrt, sub_1, relu, pow_2, mul_2, add_2, losses, mean], Original ATen: [aten.mul, aten.add, aten.sqrt, aten.rsub, aten.relu, aten.pow, aten.mean]
triton_per_fused_add_mean_mul_pow_relu_rsub_sqrt_1.run(buf2, arg0_1, buf0, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del buf0
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torchvision.transforms import functional as F
import torch.utils.data
from torch import nn
import torch.nn.functional as F
class ContrastiveLoss(nn.Module):
"""
Contrastive loss
Takes embeddings of two samples and a target label == 1 if samples are from the same class and label == 0 otherwise
output1/output2: embeddings nx2
"""
def __init__(self, margin):
super(ContrastiveLoss, self).__init__()
self.margin = margin
self.eps = 1e-09
def forward(self, output1, output2, target, size_average=True):
target = target
distances = (output2 - output1).pow(2).sum(1)
losses = 0.5 * (target * distances + (1 + -1 * target) * F.relu(
self.margin - (distances + self.eps).sqrt()).pow(2))
return losses.mean() if size_average else losses.sum()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'margin': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_pow_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp4 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp9 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp10 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp14 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp15 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tl.store(out_ptr0 + x2, tmp18, xmask)
@triton.jit
def triton_per_fused_add_mean_mul_pow_relu_rsub_sqrt_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex
r0 = rindex % 64
tmp0 = tl.load(in_ptr0 + r2, None)
tmp1 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = -1.0
tmp4 = tmp0 * tmp3
tmp5 = 1.0
tmp6 = tmp4 + tmp5
tmp7 = 1e-09
tmp8 = tmp1 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tmp10 = 4.0
tmp11 = tmp10 - tmp9
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp14 = tmp13 * tmp13
tmp15 = tmp6 * tmp14
tmp16 = tmp2 + tmp15
tmp17 = 0.5
tmp18 = tmp16 * tmp17
tmp19 = tl.broadcast_to(tmp18, [RBLOCK])
tmp21 = triton_helpers.promote_to_tensor(tl.sum(tmp19, 0))
tmp22 = 256.0
tmp23 = tmp21 / tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp23, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_pow_sub_sum_0[grid(64)](arg1_1, arg2_1, buf0, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del arg1_1
del arg2_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused_add_mean_mul_pow_relu_rsub_sqrt_1[grid(1)](buf2,
arg0_1, buf0, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del buf0
return buf2,
class ContrastiveLossNew(nn.Module):
"""
Contrastive loss
Takes embeddings of two samples and a target label == 1 if samples are from the same class and label == 0 otherwise
output1/output2: embeddings nx2
"""
def __init__(self, margin):
super(ContrastiveLossNew, self).__init__()
self.margin = margin
self.eps = 1e-09
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| Sigma10010/nuclei_cells_det | ContrastiveLoss | false | 17,931 | [
"MIT"
] | 4 | c074175fec8938472bb4cddabd83d1d0ea78f230 | https://github.com/Sigma10010/nuclei_cells_det/tree/c074175fec8938472bb4cddabd83d1d0ea78f230 |
AttentionLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/gm/cgmu3hqtju4n2ybutawn42cbtgivpwminncmzeooab62ilot5bir.py
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
# Source node to ATen node mapping:
# _weight_norm => pow_1, pow_2, sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_3, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {})
# %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
triton_poi_fused__weight_norm_interface_0 = async_compile.triton('triton_poi_fused__weight_norm_interface_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__weight_norm_interface_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__weight_norm_interface_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tl.store(out_ptr0 + (x0), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/g7/cg7ft2rl6icxu4ysufidys6qejwq3jtdlwig7h3gbbkptycrwfrb.py
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
# Source node to ATen node mapping:
# _weight_norm => div, mul
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_2, %pow_2), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %div), kwargs = {})
triton_poi_fused__weight_norm_interface_1 = async_compile.triton('triton_poi_fused__weight_norm_interface_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__weight_norm_interface_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__weight_norm_interface_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 / tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/cu/ccu4gtegvzqef2zkf3dnc3bnf2cbwr56j6w47xcbkohcbbf75dyl.py
# Topologically Sorted Source Nodes: [add, x], Original ATen: [aten.add, aten.mul]
# Source node to ATen node mapping:
# add => add
# x => mul_1
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_5), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.7071067811865476), kwargs = {})
triton_poi_fused_add_mul_2 = async_compile.triton('triton_poi_fused_add_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = 0.7071067811865476
tmp6 = tmp4 * tmp5
tl.store(in_out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/kj/ckjie4jfhwucdjwrkoxyorl5ltzj7tre6m3jengzvslyxc6fnmbt.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# x_2 => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/bt/cbt34fhodpavx2oq24hpku3ag5gk3mibbr2ozkxz4z7bs2m23gfy.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# x_2 => div_1, sum_2
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_2), kwargs = {})
triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ir/cir7ahxfw6lokvigrd2opvbk747rftuxfecv2bm444pm52to3xuq.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# x_5 => mul_2
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm_1, 2.0), kwargs = {})
triton_poi_fused_mul_5 = async_compile.triton('triton_poi_fused_mul_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_5(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 1), (1, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 1), (1, 1))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
stream0 = get_raw_stream(0)
triton_poi_fused__weight_norm_interface_0.run(primals_3, buf0, 4, grid=grid(4), stream=stream0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_3, primals_2, buf0, buf1, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [add, x], Original ATen: [aten.add, aten.mul]
triton_poi_fused_add_mul_2.run(buf3, primals_4, primals_5, 64, grid=grid(64), stream=stream0)
del primals_4
del primals_5
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, x, x_1], Original ATen: [aten.add, aten.mul, aten.bmm]
extern_kernels.bmm(buf3, primals_6, out=buf4)
buf5 = reinterpret_tensor(buf3, (16, 4), (4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf4, buf5, 64, grid=grid(64), stream=stream0)
buf6 = reinterpret_tensor(buf4, (16, 4), (4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf5, buf6, 64, grid=grid(64), stream=stream0)
buf7 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_6, (4, 4, 4), (16, 1, 4), 0), out=buf7)
buf8 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_1], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_8, buf8, 4, grid=grid(4), stream=stream0)
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_1], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_8, primals_7, buf8, buf9, 16, grid=grid(16), stream=stream0)
buf10 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.mul]
triton_poi_fused_mul_5.run(buf10, 64, grid=grid(64), stream=stream0)
buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(buf9, (4, 4), (1, 4), 0), out=buf11)
buf12 = reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0); del buf11 # reuse
# Topologically Sorted Source Nodes: [add_1, x_6], Original ATen: [aten.add, aten.mul]
triton_poi_fused_add_mul_2.run(buf12, primals_9, primals_1, 64, grid=grid(64), stream=stream0)
del primals_9
return (buf12, reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1), 0), buf1, buf9, primals_2, primals_3, primals_7, primals_8, buf0, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf6, reinterpret_tensor(primals_6, (4, 4, 4), (16, 1, 4), 0), buf8, reinterpret_tensor(buf10, (16, 4), (4, 1), 0), buf9, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
from torch import nn
from torch.nn import functional as F
import torch.nn.init
def Linear(in_features, out_features, dropout=0.0):
m = nn.Linear(in_features, out_features)
m.weight.data.normal_(mean=0, std=math.sqrt((1 - dropout) / in_features))
m.bias.data.zero_()
return nn.utils.weight_norm(m)
class AttentionLayer(nn.Module):
def __init__(self, conv_channels, embed_dim):
super(AttentionLayer, self).__init__()
self.in_projection = Linear(conv_channels, embed_dim)
self.out_projection = Linear(embed_dim, conv_channels)
self.bmm = torch.bmm
def forward(self, x, wordemb, imgsfeats):
residual = x
x = (self.in_projection(x) + wordemb) * math.sqrt(0.5)
b, c, n = imgsfeats.size()
y = imgsfeats.view(b, c, n)
x = self.bmm(x, y)
sz = x.size()
x = F.softmax(x.view(sz[0] * sz[1], sz[2]))
x = x.view(sz)
attn_scores = x
y = y.permute(0, 2, 1)
x = self.bmm(x, y)
s = y.size(1)
x = x * (s * math.sqrt(1.0 / s))
x = (self.out_projection(x) + residual) * math.sqrt(0.5)
return x, attn_scores
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'conv_channels': 4, 'embed_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
from torch import nn
import torch.nn.init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__weight_norm_interface_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tl.store(out_ptr0 + x0, tmp11, xmask)
@triton.jit
def triton_poi_fused__weight_norm_interface_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 / tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_mul_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = 0.7071067811865476
tmp6 = tmp4 * tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_5(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 1), (1, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 1), (1, 1))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__weight_norm_interface_0[grid(4)](primals_3, buf0,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(16)](primals_3,
primals_2, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0)
del buf2
triton_poi_fused_add_mul_2[grid(64)](buf3, primals_4, primals_5, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_4
del primals_5
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf3, primals_6, out=buf4)
buf5 = reinterpret_tensor(buf3, (16, 4), (4, 1), 0)
del buf3
triton_poi_fused__softmax_3[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = reinterpret_tensor(buf4, (16, 4), (4, 1), 0)
del buf4
triton_poi_fused__softmax_4[grid(64)](buf5, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0)
del buf5
extern_kernels.bmm(reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1),
0), reinterpret_tensor(primals_6, (4, 4, 4), (16, 1, 4), 0),
out=buf7)
buf8 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(4)](primals_8, buf8,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(16)](primals_8,
primals_7, buf8, buf9, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf10 = buf7
del buf7
triton_poi_fused_mul_5[grid(64)](buf10, 64, XBLOCK=64, num_warps=1,
num_stages=1)
buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf10, (16, 4), (4, 1), 0),
reinterpret_tensor(buf9, (4, 4), (1, 4), 0), out=buf11)
buf12 = reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0)
del buf11
triton_poi_fused_add_mul_2[grid(64)](buf12, primals_9, primals_1,
64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_9
return (buf12, reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1), 0), buf1,
buf9, primals_2, primals_3, primals_7, primals_8, buf0,
reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf6,
reinterpret_tensor(primals_6, (4, 4, 4), (16, 1, 4), 0), buf8,
reinterpret_tensor(buf10, (16, 4), (4, 1), 0), buf9)
def Linear(in_features, out_features, dropout=0.0):
m = nn.Linear(in_features, out_features)
m.weight.data.normal_(mean=0, std=math.sqrt((1 - dropout) / in_features))
m.bias.data.zero_()
return nn.utils.weight_norm(m)
class AttentionLayerNew(nn.Module):
def __init__(self, conv_channels, embed_dim):
super(AttentionLayerNew, self).__init__()
self.in_projection = Linear(conv_channels, embed_dim)
self.out_projection = Linear(embed_dim, conv_channels)
self.bmm = torch.bmm
def forward(self, input_0, input_1, input_2):
primals_4 = self.in_projection.bias
primals_2 = self.in_projection.weight_g
primals_3 = self.in_projection.weight_v
primals_9 = self.out_projection.bias
primals_7 = self.out_projection.weight_g
primals_8 = self.out_projection.weight_v
primals_1 = input_0
primals_5 = input_1
primals_6 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1]
| Shiyang-Yan/Discrete-continous-PG-for-Retrieval | AttentionLayer | false | 17,932 | [
"Apache-2.0"
] | 8 | 39fd7a81f732ae043c2ea20352a0c55b72834639 | https://github.com/Shiyang-Yan/Discrete-continous-PG-for-Retrieval/tree/39fd7a81f732ae043c2ea20352a0c55b72834639 |
wide_basic | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/kr/ckrtwovkhwxxs4z33vkmsczo5gcqzvnvdb3zcpdhaxniciqjmysf.py
# Topologically Sorted Source Nodes: [leaky_relu], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# leaky_relu => gt, mul, where
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%primals_1, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 0.2), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %primals_1, %mul), kwargs = {})
triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/vp/cvpiu32ud5hot6gudwwpt47wc2hc56wzti7olzripo2g3thb35ry.py
# Topologically Sorted Source Nodes: [conv2d, leaky_relu_1], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d => convolution
# leaky_relu_1 => gt_1, mul_1, where_1
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution, %mul_1), kwargs = {})
triton_poi_fused_convolution_leaky_relu_1 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr1 + (x3), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/wu/cwu4aunzr3ywj24slclusvueucyumpdk3xwdnttwgdrxicosuhz6.py
# Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.convolution, aten.add]
# Source node to ATen node mapping:
# out_1 => convolution_1
# out_2 => add
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_1, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %primals_1), kwargs = {})
triton_poi_fused_add_convolution_2 = async_compile.triton('triton_poi_fused_add_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x3), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [leaky_relu], Original ATen: [aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_leaky_relu_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, leaky_relu_1], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_1.run(buf1, primals_3, buf2, buf3, 256, grid=grid(256), stream=stream0)
del buf1
del primals_3
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.convolution, aten.add]
triton_poi_fused_add_convolution_2.run(buf5, primals_5, primals_1, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_5
return (buf5, primals_2, primals_4, buf0, buf2, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils
import torch.utils.data
def get_norm(n_filters, norm):
if norm is None:
return Identity()
elif norm == 'batch':
return nn.BatchNorm2d(n_filters, momentum=0.9)
elif norm == 'instance':
return nn.InstanceNorm2d(n_filters, affine=True)
elif norm == 'layer':
return nn.GroupNorm(1, n_filters)
elif norm == 'act':
return norms.ActNorm(n_filters, False)
class Identity(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, x):
return x
class wide_basic(nn.Module):
def __init__(self, in_planes, planes, dropout_rate, stride=1, norm=None,
leak=0.2):
super(wide_basic, self).__init__()
self.lrelu = nn.LeakyReLU(leak)
self.bn1 = get_norm(in_planes, norm)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1,
bias=True)
self.dropout = Identity() if dropout_rate == 0.0 else nn.Dropout(p=
dropout_rate)
self.bn2 = get_norm(planes, norm)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=True)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, planes,
kernel_size=1, stride=stride, bias=True))
def forward(self, x):
out = self.dropout(self.conv1(self.lrelu(self.bn1(x))))
out = self.conv2(self.lrelu(self.bn2(out)))
out += self.shortcut(x)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_planes': 4, 'planes': 4, 'dropout_rate': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_add_convolution_2(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x3, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(256)](primals_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_convolution_leaky_relu_1[grid(256)](buf1,
primals_3, buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf1
del primals_3
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = buf4
del buf4
triton_poi_fused_add_convolution_2[grid(256)](buf5, primals_5,
primals_1, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_5
return buf5, primals_2, primals_4, buf0, buf2, buf3
def get_norm(n_filters, norm):
if norm is None:
return Identity()
elif norm == 'batch':
return nn.BatchNorm2d(n_filters, momentum=0.9)
elif norm == 'instance':
return nn.InstanceNorm2d(n_filters, affine=True)
elif norm == 'layer':
return nn.GroupNorm(1, n_filters)
elif norm == 'act':
return norms.ActNorm(n_filters, False)
class Identity(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, x):
return x
class wide_basicNew(nn.Module):
def __init__(self, in_planes, planes, dropout_rate, stride=1, norm=None,
leak=0.2):
super(wide_basicNew, self).__init__()
self.lrelu = nn.LeakyReLU(leak)
self.bn1 = get_norm(in_planes, norm)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1,
bias=True)
self.dropout = Identity() if dropout_rate == 0.0 else nn.Dropout(p=
dropout_rate)
self.bn2 = get_norm(planes, norm)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=True)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, planes,
kernel_size=1, stride=stride, bias=True))
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| Silent-Zebra/JEM | wide_basic | false | 17,933 | [
"Apache-2.0"
] | 6 | 33440aff8429d9a24a8ba858d0209f4b48be8e05 | https://github.com/Silent-Zebra/JEM/tree/33440aff8429d9a24a8ba858d0209f4b48be8e05 |
CPUReverseForgetMult | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/gl/cgl224eaxxzsmya7fdujrol5zlwofh5ljmx7qnbod5lcpgporen5.py
# Topologically Sorted Source Nodes: [sub_2, sub_1, sub, mul_1, h_2, mul_2, h_4, mul_3, h_6, cat], Original ATen: [aten.rsub, aten.mul, aten.add, aten.cat]
# Source node to ATen node mapping:
# cat => cat
# h_2 => add
# h_4 => add_1
# h_6 => add_2
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# Graph fragment:
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem_2), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %squeeze), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%squeeze_1, %mul_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %add), kwargs = {})
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%squeeze_2, %mul_2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %add_1), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%squeeze_3, %mul_3), kwargs = {})
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%add_2, %add_1, %add, %squeeze],), kwargs = {})
triton_poi_fused_add_cat_mul_rsub_0 = async_compile.triton('triton_poi_fused_add_cat_mul_rsub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_cat_mul_rsub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_cat_mul_rsub_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (64 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (128 + x2), xmask)
tmp8 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (192 + x2), xmask)
tmp16 = tl.load(in_ptr1 + (x2), xmask)
tmp18 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = 1.0
tmp5 = tmp4 - tmp3
tmp7 = tmp0 * tmp6
tmp9 = tmp4 - tmp8
tmp11 = tmp0 * tmp10
tmp12 = tmp9 * tmp11
tmp13 = tmp7 + tmp12
tmp14 = tmp5 * tmp13
tmp15 = tmp2 + tmp14
tmp17 = tmp0 * tmp16
tmp19 = tmp4 - tmp18
tmp20 = tmp19 * tmp15
tmp21 = tmp17 + tmp20
tl.store(out_ptr0 + (x2), tmp15, xmask)
tl.store(out_ptr1 + (x2), tmp13, xmask)
tl.store(out_ptr2 + (x2), tmp21, xmask)
tl.store(out_ptr3 + (x2), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
buf0 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 64) # alias
buf2 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 128) # alias
buf1 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0) # alias
buf3 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 192) # alias
# Topologically Sorted Source Nodes: [sub_2, sub_1, sub, mul_1, h_2, mul_2, h_4, mul_3, h_6, cat], Original ATen: [aten.rsub, aten.mul, aten.add, aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_add_cat_mul_rsub_0.run(arg0_1, arg1_1, buf0, buf2, buf1, buf3, 64, grid=grid(64), stream=stream0)
del arg0_1
del arg1_1
return (buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class CPUReverseForgetMult(torch.nn.Module):
def __init__(self):
super(CPUReverseForgetMult, self).__init__()
def forward(self, f, x, hidden_init=None):
result = []
forgets = f.split(1, dim=0)[::-1]
inputs = (f * x).split(1, dim=0)[::-1]
prev_h = hidden_init
for i, h in enumerate(inputs):
h = h.squeeze()
if prev_h is not None:
h = h + (1 - forgets[i]) * prev_h
result.append(h)
prev_h = h
result = result[::-1]
return torch.cat(result, dim=0)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_cat_mul_rsub_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + (64 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (128 + x2), xmask)
tmp8 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (192 + x2), xmask)
tmp16 = tl.load(in_ptr1 + x2, xmask)
tmp18 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = 1.0
tmp5 = tmp4 - tmp3
tmp7 = tmp0 * tmp6
tmp9 = tmp4 - tmp8
tmp11 = tmp0 * tmp10
tmp12 = tmp9 * tmp11
tmp13 = tmp7 + tmp12
tmp14 = tmp5 * tmp13
tmp15 = tmp2 + tmp14
tmp17 = tmp0 * tmp16
tmp19 = tmp4 - tmp18
tmp20 = tmp19 * tmp15
tmp21 = tmp17 + tmp20
tl.store(out_ptr0 + x2, tmp15, xmask)
tl.store(out_ptr1 + x2, tmp13, xmask)
tl.store(out_ptr2 + x2, tmp21, xmask)
tl.store(out_ptr3 + x2, tmp11, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
buf0 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 64)
buf2 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 128)
buf1 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0)
buf3 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 192)
get_raw_stream(0)
triton_poi_fused_add_cat_mul_rsub_0[grid(64)](arg0_1, arg1_1, buf0,
buf2, buf1, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return buf4,
class CPUReverseForgetMultNew(torch.nn.Module):
def __init__(self):
super(CPUReverseForgetMultNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| Smerity/pytorch-qrnn | CPUReverseForgetMult | false | 17,934 | [
"BSD-3-Clause"
] | 4 | 907c8ea53f689136fcc50996b6474de967745202 | https://github.com/Smerity/pytorch-qrnn/tree/907c8ea53f689136fcc50996b6474de967745202 |
Project3D | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/kr/ckrqlzua65nhcln4midrf2pl3tvnjymm2dp3k72pz4ewuznk4gxv.py
# Topologically Sorted Source Nodes: [cam_points], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# cam_points => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_2,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 48
x1 = (xindex // 48)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/u4/cu4g75wjxdgmpiaoh52q4hr2kvbrsv4sgbnhvr2co3j5nfxzbi3u.py
# Topologically Sorted Source Nodes: [sub, pix_coords_3], Original ATen: [aten.sub, aten.mul]
# Source node to ATen node mapping:
# pix_coords_3 => mul
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute_16, 0.5), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, 2), kwargs = {})
triton_poi_fused_mul_sub_1 = async_compile.triton('triton_poi_fused_mul_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sub_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 2
x0 = xindex % 16
x2 = (xindex // 32)
x3 = xindex % 32
x4 = xindex
tmp7 = tl.load(in_ptr0 + (x0 + (48*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + (48*x2)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (16 + x0 + (48*x2)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr0 + (x3 + (48*x2)), xmask)
tmp0 = x1
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tmp1 == tmp1
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = tmp1 == tmp4
tmp6 = tmp4 == tmp4
tmp9 = 1e-07
tmp10 = tmp8 + tmp9
tmp11 = tmp7 / tmp10
tmp12 = 0.3333333333333333
tmp13 = tmp11 * tmp12
tmp14 = tl.where(tmp6, tmp13, tmp11)
tmp16 = tmp15 / tmp10
tmp17 = tl.where(tmp5, tmp13, tmp16)
tmp18 = tl.where(tmp5, tmp14, tmp17)
tmp19 = tmp18 * tmp12
tmp20 = tl.where(tmp3, tmp19, tmp18)
tmp21 = tmp0 == tmp4
tmp23 = tmp22 / tmp10
tmp24 = tl.where(tmp21, tmp13, tmp23)
tmp25 = tl.where(tmp21, tmp14, tmp24)
tmp26 = tl.where(tmp2, tmp19, tmp25)
tmp27 = tl.where(tmp2, tmp20, tmp26)
tmp28 = 0.5
tmp29 = tmp27 - tmp28
tmp30 = 2.0
tmp31 = tmp29 * tmp30
tl.store(out_ptr0 + (x4), tmp31, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 3, 4, 4), (48, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cam_points], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf0, buf1, 192, grid=grid(192), stream=stream0)
del buf0
buf2 = empty_strided_cuda((12, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cam_points], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf1, (12, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (12, 4, 4), (16, 4, 1), 0), out=buf2)
del arg2_1
del buf1
buf3 = empty_strided_cuda((4, 4, 4, 2), (32, 4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [sub, pix_coords_3], Original ATen: [aten.sub, aten.mul]
triton_poi_fused_mul_sub_1.run(buf2, buf3, 128, grid=grid(128), stream=stream0)
del buf2
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 3, 4, 4), (48, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Project3D(nn.Module):
"""Layer which projects 3D points into a camera with intrinsics K and at position T
"""
def __init__(self, batch_size, height, width, eps=1e-07):
super(Project3D, self).__init__()
self.batch_size = batch_size
self.height = height
self.width = width
self.eps = eps
def forward(self, points, K, T):
P = torch.matmul(K, T)[:, :3, :]
cam_points = torch.matmul(P, points)
pix_coords = cam_points[:, :2, :] / (cam_points[:, 2, :].unsqueeze(
1) + self.eps)
pix_coords = pix_coords.view(self.batch_size, 2, self.height, self.
width)
pix_coords = pix_coords.permute(0, 2, 3, 1)
pix_coords[..., 0] /= self.width - 1
pix_coords[..., 1] /= self.height - 1
pix_coords = (pix_coords - 0.5) * 2
return pix_coords
def get_inputs():
return [torch.rand([4, 3, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'batch_size': 4, 'height': 4, 'width': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 48
x1 = xindex // 48
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_mul_sub_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 2
x0 = xindex % 16
x2 = xindex // 32
x3 = xindex % 32
x4 = xindex
tmp7 = tl.load(in_ptr0 + (x0 + 48 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 48 * x2), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (16 + x0 + 48 * x2), xmask, eviction_policy=
'evict_last')
tmp22 = tl.load(in_ptr0 + (x3 + 48 * x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tmp1 == tmp1
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = tmp1 == tmp4
tmp6 = tmp4 == tmp4
tmp9 = 1e-07
tmp10 = tmp8 + tmp9
tmp11 = tmp7 / tmp10
tmp12 = 0.3333333333333333
tmp13 = tmp11 * tmp12
tmp14 = tl.where(tmp6, tmp13, tmp11)
tmp16 = tmp15 / tmp10
tmp17 = tl.where(tmp5, tmp13, tmp16)
tmp18 = tl.where(tmp5, tmp14, tmp17)
tmp19 = tmp18 * tmp12
tmp20 = tl.where(tmp3, tmp19, tmp18)
tmp21 = tmp0 == tmp4
tmp23 = tmp22 / tmp10
tmp24 = tl.where(tmp21, tmp13, tmp23)
tmp25 = tl.where(tmp21, tmp14, tmp24)
tmp26 = tl.where(tmp2, tmp19, tmp25)
tmp27 = tl.where(tmp2, tmp20, tmp26)
tmp28 = 0.5
tmp29 = tmp27 - tmp28
tmp30 = 2.0
tmp31 = tmp29 * tmp30
tl.store(out_ptr0 + x4, tmp31, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 3, 4, 4), (48, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1), 0),
out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(192)](buf0, buf1, 192, XBLOCK=128,
num_warps=4, num_stages=1)
del buf0
buf2 = empty_strided_cuda((12, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (12, 4, 4), (16, 4, 1),
0), reinterpret_tensor(arg2_1, (12, 4, 4), (16, 4, 1), 0), out=buf2
)
del arg2_1
del buf1
buf3 = empty_strided_cuda((4, 4, 4, 2), (32, 4, 1, 16), torch.float32)
triton_poi_fused_mul_sub_1[grid(128)](buf2, buf3, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del buf2
return buf3,
class Project3DNew(nn.Module):
"""Layer which projects 3D points into a camera with intrinsics K and at position T
"""
def __init__(self, batch_size, height, width, eps=1e-07):
super(Project3DNew, self).__init__()
self.batch_size = batch_size
self.height = height
self.width = width
self.eps = eps
def forward(self, input_0, input_1, input_2):
arg2_1 = input_0
arg0_1 = input_1
arg1_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| Sid1057/sid1057.github.io | Project3D | false | 17,935 | [
"MIT"
] | 4 | 623d1731e308b42b6f86304dcfd671a061b414bf | https://github.com/Sid1057/sid1057.github.io/tree/623d1731e308b42b6f86304dcfd671a061b414bf |
ConvBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/tt/ctthexrcgmoykvsyasq7xirwxi6m3yxgjocmuvarikaawgqvdiws.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d]
# Source node to ATen node mapping:
# out => _unsafe_index, _unsafe_index_1
# Graph fragment:
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {})
triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = (xindex // 6) % 6
x2 = (xindex // 36)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ty/ctyzpua7fjfn7hztwdpigb3gi7yf6hmfsexovkmknohzjtrj2z3h.py
# Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.convolution, aten.elu]
# Source node to ATen node mapping:
# out_1 => convolution
# out_2 => expm1, gt, mul, mul_2, where
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 1.0), kwargs = {})
# %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {})
triton_poi_fused_convolution_elu_1 = async_compile.triton('triton_poi_fused_convolution_elu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_elu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 1.0
tmp6 = tmp2 * tmp5
tmp7 = libdevice.expm1(tmp6)
tmp8 = tmp7 * tmp5
tmp9 = tl.where(tmp4, tmp6, tmp8)
tl.store(in_out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d]
stream0 = get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0.run(primals_1, buf0, 576, grid=grid(576), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.convolution, aten.elu]
triton_poi_fused_convolution_elu_1.run(buf2, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
return (buf2, primals_2, buf0, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Conv3x3(nn.Module):
"""Layer to pad and convolve input
"""
def __init__(self, in_channels, out_channels, use_refl=True):
super(Conv3x3, self).__init__()
if use_refl:
self.pad = nn.ReflectionPad2d(1)
else:
self.pad = nn.ZeroPad2d(1)
self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3)
def forward(self, x):
out = self.pad(x)
out = self.conv(out)
return out
class ConvBlock(nn.Module):
"""Layer to perform a convolution followed by ELU
"""
def __init__(self, in_channels, out_channels):
super(ConvBlock, self).__init__()
self.conv = Conv3x3(in_channels, out_channels)
self.nonlin = nn.ELU(inplace=True)
def forward(self, x):
out = self.conv(x)
out = self.nonlin(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_elu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 1.0
tmp6 = tmp2 * tmp5
tmp7 = libdevice.expm1(tmp6)
tmp8 = tmp7 * tmp5
tmp9 = tl.where(tmp4, tmp6, tmp8)
tl.store(in_out_ptr0 + x3, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(576)](primals_1, buf0, 576,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_elu_1[grid(256)](buf2, primals_3, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
return buf2, primals_2, buf0, buf2
class Conv3x3(nn.Module):
"""Layer to pad and convolve input
"""
def __init__(self, in_channels, out_channels, use_refl=True):
super(Conv3x3, self).__init__()
if use_refl:
self.pad = nn.ReflectionPad2d(1)
else:
self.pad = nn.ZeroPad2d(1)
self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3)
def forward(self, x):
out = self.pad(x)
out = self.conv(out)
return out
class ConvBlockNew(nn.Module):
"""Layer to perform a convolution followed by ELU
"""
def __init__(self, in_channels, out_channels):
super(ConvBlockNew, self).__init__()
self.conv = Conv3x3(in_channels, out_channels)
self.nonlin = nn.ELU(inplace=True)
def forward(self, input_0):
primals_2 = self.conv.conv.weight
primals_3 = self.conv.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| Sid1057/sid1057.github.io | ConvBlock | false | 17,936 | [
"MIT"
] | 4 | 623d1731e308b42b6f86304dcfd671a061b414bf | https://github.com/Sid1057/sid1057.github.io/tree/623d1731e308b42b6f86304dcfd671a061b414bf |
VirtualBatchNorm1d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/dt/cdt5xe76ihib7kbkeuxcku2laf3x3omyb56bnktw3634fuky6upy.py
# Topologically Sorted Source Nodes: [mean, mean_2, pow_1, mean_3, mean_sq], Original ATen: [aten.mean, aten.pow]
# Source node to ATen node mapping:
# mean => mean
# mean_2 => mean_1
# mean_3 => mean_2
# mean_sq => mean_3
# pow_1 => pow_1
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [2], True), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mean, [0], True), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {})
# %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [2], True), kwargs = {})
# %mean_3 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mean_2, [0], True), kwargs = {})
triton_poi_fused_mean_pow_0 = async_compile.triton('triton_poi_fused_mean_pow_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_pow_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_pow_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (16 + (4*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (17 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (18 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (19 + (4*x0)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (32 + (4*x0)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (33 + (4*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (34 + (4*x0)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (35 + (4*x0)), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (48 + (4*x0)), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr0 + (49 + (4*x0)), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr0 + (50 + (4*x0)), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr0 + (51 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = tmp15 / tmp7
tmp17 = tmp8 + tmp16
tmp20 = tmp18 + tmp19
tmp22 = tmp20 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = tmp24 / tmp7
tmp26 = tmp17 + tmp25
tmp29 = tmp27 + tmp28
tmp31 = tmp29 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = tmp33 / tmp7
tmp35 = tmp26 + tmp34
tmp36 = tmp35 / tmp7
tmp37 = tmp0 * tmp0
tmp38 = tmp1 * tmp1
tmp39 = tmp37 + tmp38
tmp40 = tmp3 * tmp3
tmp41 = tmp39 + tmp40
tmp42 = tmp5 * tmp5
tmp43 = tmp41 + tmp42
tmp44 = tmp43 / tmp7
tmp45 = tmp9 * tmp9
tmp46 = tmp10 * tmp10
tmp47 = tmp45 + tmp46
tmp48 = tmp12 * tmp12
tmp49 = tmp47 + tmp48
tmp50 = tmp14 * tmp14
tmp51 = tmp49 + tmp50
tmp52 = tmp51 / tmp7
tmp53 = tmp44 + tmp52
tmp54 = tmp18 * tmp18
tmp55 = tmp19 * tmp19
tmp56 = tmp54 + tmp55
tmp57 = tmp21 * tmp21
tmp58 = tmp56 + tmp57
tmp59 = tmp23 * tmp23
tmp60 = tmp58 + tmp59
tmp61 = tmp60 / tmp7
tmp62 = tmp53 + tmp61
tmp63 = tmp27 * tmp27
tmp64 = tmp28 * tmp28
tmp65 = tmp63 + tmp64
tmp66 = tmp30 * tmp30
tmp67 = tmp65 + tmp66
tmp68 = tmp32 * tmp32
tmp69 = tmp67 + tmp68
tmp70 = tmp69 / tmp7
tmp71 = tmp62 + tmp70
tmp72 = tmp71 / tmp7
tl.store(out_ptr0 + (x0), tmp36, xmask)
tl.store(out_ptr1 + (x0), tmp72, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/lw/clw6yc2b22u363b3kkvc7zqldmbvoq4hxinkzr5iltmjj55lgg3r.py
# Topologically Sorted Source Nodes: [mul, mul_1, mean_5], Original ATen: [aten.mul, aten.add]
# Source node to ATen node mapping:
# mean_5 => add
# mul => mul
# mul_1 => mul_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean_1, 0.2), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, 0.8), kwargs = {})
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
triton_poi_fused_add_mul_1 = async_compile.triton('triton_poi_fused_add_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp1 = 0.2
tmp2 = tmp0 * tmp1
tmp4 = 0.8
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/o2/co2tnhofzjl22uhffay4dviyzltwer2zslquy2uh72nwqsd4mmlu.py
# Topologically Sorted Source Nodes: [add_2, pow_2, sub, std, x, x_1, x_2, x_3], Original ATen: [aten.add, aten.pow, aten.sub, aten.sqrt, aten.div, aten.mul]
# Source node to ATen node mapping:
# add_2 => add_2
# pow_2 => pow_2
# std => sqrt
# sub => sub
# x => sub_1
# x_1 => div
# x_2 => mul_4
# x_3 => add_3
# Graph fragment:
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, 1e-05), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %pow_2), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sub,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %add), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_1, %sqrt), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %primals_4), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_5), kwargs = {})
triton_poi_fused_add_div_mul_pow_sqrt_sub_2 = async_compile.triton('triton_poi_fused_add_div_mul_pow_sqrt_sub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_pow_sqrt_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_pow_sqrt_sub_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex % 16
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 1e-05
tmp5 = tmp3 + tmp4
tmp6 = tmp1 * tmp1
tmp7 = tmp5 - tmp6
tmp8 = libdevice.sqrt(tmp7)
tmp9 = tmp2 / tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x3), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (1, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (1, 4, 1), (4, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 1), (4, 1, 4), torch.float32)
buf1 = empty_strided_cuda((1, 4, 1), (4, 1, 4), torch.float32)
# Topologically Sorted Source Nodes: [mean, mean_2, pow_1, mean_3, mean_sq], Original ATen: [aten.mean, aten.pow]
stream0 = get_raw_stream(0)
triton_poi_fused_mean_pow_0.run(primals_1, buf0, buf1, 4, grid=grid(4), stream=stream0)
buf2 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, mul_1, mean_5], Original ATen: [aten.mul, aten.add]
triton_poi_fused_add_mul_1.run(buf0, primals_2, buf2, 16, grid=grid(16), stream=stream0)
del buf0
del primals_2
buf3 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_2, mul_3, mean_sq_1], Original ATen: [aten.mul, aten.add]
triton_poi_fused_add_mul_1.run(buf1, primals_3, buf3, 16, grid=grid(16), stream=stream0)
del buf1
del primals_3
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add_2, pow_2, sub, std, x, x_1, x_2, x_3], Original ATen: [aten.add, aten.pow, aten.sub, aten.sqrt, aten.div, aten.mul]
triton_poi_fused_add_div_mul_pow_sqrt_sub_2.run(primals_1, buf2, buf3, primals_4, primals_5, buf4, 64, grid=grid(64), stream=stream0)
del primals_4
del primals_5
return (buf4, buf2, buf3, primals_1, buf2, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import torch
import torch.utils
import torch.utils.data
from torch.nn.parameter import Parameter
from torch.nn.modules import Module
class VirtualBatchNorm1d(Module):
"""
Module for Virtual Batch Normalization.
Implementation borrowed and modified from Rafael_Valle's code + help of SimonW from this discussion thread:
https://discuss.pytorch.org/t/parameter-grad-of-conv-weight-is-none-after-virtual-batch-normalization/9036
"""
def __init__(self, num_features: 'int', eps: 'float'=1e-05):
super().__init__()
self.num_features = num_features
self.eps = eps
self.ref_mean = self.register_parameter('ref_mean', None)
self.ref_mean_sq = self.register_parameter('ref_mean_sq', None)
gamma = torch.normal(mean=torch.ones(1, num_features, 1), std=0.02)
self.gamma = Parameter(gamma.float())
self.beta = Parameter(torch.FloatTensor(1, num_features, 1).fill_(0))
def get_stats(self, x):
"""
Calculates mean and mean square for given batch x.
Args:
x: tensor containing batch of activations
Returns:
mean: mean tensor over features
mean_sq: squared mean tensor over features
"""
mean = x.mean(2, keepdim=True).mean(0, keepdim=True)
mean_sq = (x ** 2).mean(2, keepdim=True).mean(0, keepdim=True)
return mean, mean_sq
def forward(self, x, ref_mean: 'None', ref_mean_sq: 'None'):
"""
Forward pass of virtual batch normalization.
Virtual batch normalization require two forward passes
for reference batch and train batch, respectively.
The input parameter is_reference should indicate whether it is a forward pass
for reference batch or not.
Args:
x: input tensor
is_reference(bool): True if forwarding for reference batch
Result:
x: normalized batch tensor
"""
mean, mean_sq = self.get_stats(x)
if ref_mean is None or ref_mean_sq is None:
mean = mean.clone().detach()
mean_sq = mean_sq.clone().detach()
out = self._normalize(x, mean, mean_sq)
else:
batch_size = x.size(0)
new_coeff = 1.0 / (batch_size + 1.0)
old_coeff = 1.0 - new_coeff
mean = new_coeff * mean + old_coeff * ref_mean
mean_sq = new_coeff * mean_sq + old_coeff * ref_mean_sq
out = self._normalize(x, mean, mean_sq)
return out, mean, mean_sq
def _normalize(self, x, mean, mean_sq):
"""
Normalize tensor x given the statistics.
Args:
x: input tensor
mean: mean over features. it has size [1:num_features:]
mean_sq: squared means over features.
Result:
x: normalized batch tensor
"""
assert mean_sq is not None
assert mean is not None
assert len(x.size()) == 3
if mean.size(1) != self.num_features:
raise Exception(
'Mean size not equal to number of featuers : given {}, expected {}'
.format(mean.size(1), self.num_features))
if mean_sq.size(1) != self.num_features:
raise Exception(
'Squared mean tensor size not equal to number of features : given {}, expected {}'
.format(mean_sq.size(1), self.num_features))
std = torch.sqrt(self.eps + mean_sq - mean ** 2)
x = x - mean
x = x / std
x = x * self.gamma
x = x + self.beta
return x
def __repr__(self):
return '{name}(num_features={num_features}, eps={eps}'.format(name=
self.__class__.__name__, **self.__dict__)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'num_features': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch.nn import Module
import torch.utils
import torch.utils.data
from torch.nn.parameter import Parameter
from torch.nn.modules import Module
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_pow_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (16 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr0 + (17 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr0 + (18 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr0 + (19 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr0 + (32 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (33 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (34 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (35 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (48 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr0 + (49 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr0 + (50 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp32 = tl.load(in_ptr0 + (51 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = tmp15 / tmp7
tmp17 = tmp8 + tmp16
tmp20 = tmp18 + tmp19
tmp22 = tmp20 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = tmp24 / tmp7
tmp26 = tmp17 + tmp25
tmp29 = tmp27 + tmp28
tmp31 = tmp29 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = tmp33 / tmp7
tmp35 = tmp26 + tmp34
tmp36 = tmp35 / tmp7
tmp37 = tmp0 * tmp0
tmp38 = tmp1 * tmp1
tmp39 = tmp37 + tmp38
tmp40 = tmp3 * tmp3
tmp41 = tmp39 + tmp40
tmp42 = tmp5 * tmp5
tmp43 = tmp41 + tmp42
tmp44 = tmp43 / tmp7
tmp45 = tmp9 * tmp9
tmp46 = tmp10 * tmp10
tmp47 = tmp45 + tmp46
tmp48 = tmp12 * tmp12
tmp49 = tmp47 + tmp48
tmp50 = tmp14 * tmp14
tmp51 = tmp49 + tmp50
tmp52 = tmp51 / tmp7
tmp53 = tmp44 + tmp52
tmp54 = tmp18 * tmp18
tmp55 = tmp19 * tmp19
tmp56 = tmp54 + tmp55
tmp57 = tmp21 * tmp21
tmp58 = tmp56 + tmp57
tmp59 = tmp23 * tmp23
tmp60 = tmp58 + tmp59
tmp61 = tmp60 / tmp7
tmp62 = tmp53 + tmp61
tmp63 = tmp27 * tmp27
tmp64 = tmp28 * tmp28
tmp65 = tmp63 + tmp64
tmp66 = tmp30 * tmp30
tmp67 = tmp65 + tmp66
tmp68 = tmp32 * tmp32
tmp69 = tmp67 + tmp68
tmp70 = tmp69 / tmp7
tmp71 = tmp62 + tmp70
tmp72 = tmp71 / tmp7
tl.store(out_ptr0 + x0, tmp36, xmask)
tl.store(out_ptr1 + x0, tmp72, xmask)
@triton.jit
def triton_poi_fused_add_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp1 = 0.2
tmp2 = tmp0 * tmp1
tmp4 = 0.8
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_pow_sqrt_sub_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex % 16
x1 = xindex // 4 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 1e-05
tmp5 = tmp3 + tmp4
tmp6 = tmp1 * tmp1
tmp7 = tmp5 - tmp6
tmp8 = libdevice.sqrt(tmp7)
tmp9 = tmp2 / tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x3, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (1, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (1, 4, 1), (4, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 1), (4, 1, 4), torch.float32)
buf1 = empty_strided_cuda((1, 4, 1), (4, 1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_pow_0[grid(4)](primals_1, buf0, buf1, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_mul_1[grid(16)](buf0, primals_2, buf2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del buf0
del primals_2
buf3 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_mul_1[grid(16)](buf1, primals_3, buf3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del buf1
del primals_3
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_div_mul_pow_sqrt_sub_2[grid(64)](primals_1,
buf2, buf3, primals_4, primals_5, buf4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_4
del primals_5
return buf4, buf2, buf3, primals_1, buf2, buf3
class VirtualBatchNorm1dNew(Module):
"""
Module for Virtual Batch Normalization.
Implementation borrowed and modified from Rafael_Valle's code + help of SimonW from this discussion thread:
https://discuss.pytorch.org/t/parameter-grad-of-conv-weight-is-none-after-virtual-batch-normalization/9036
"""
def __init__(self, num_features: 'int', eps: 'float'=1e-05):
super().__init__()
self.num_features = num_features
self.eps = eps
self.ref_mean = self.register_parameter('ref_mean', None)
self.ref_mean_sq = self.register_parameter('ref_mean_sq', None)
gamma = torch.normal(mean=torch.ones(1, num_features, 1), std=0.02)
self.gamma = Parameter(gamma.float())
self.beta = Parameter(torch.FloatTensor(1, num_features, 1).fill_(0))
def get_stats(self, x):
"""
Calculates mean and mean square for given batch x.
Args:
x: tensor containing batch of activations
Returns:
mean: mean tensor over features
mean_sq: squared mean tensor over features
"""
mean = x.mean(2, keepdim=True).mean(0, keepdim=True)
mean_sq = (x ** 2).mean(2, keepdim=True).mean(0, keepdim=True)
return mean, mean_sq
def _normalize(self, x, mean, mean_sq):
"""
Normalize tensor x given the statistics.
Args:
x: input tensor
mean: mean over features. it has size [1:num_features:]
mean_sq: squared means over features.
Result:
x: normalized batch tensor
"""
assert mean_sq is not None
assert mean is not None
assert len(x.size()) == 3
if mean.size(1) != self.num_features:
raise Exception(
'Mean size not equal to number of featuers : given {}, expected {}'
.format(mean.size(1), self.num_features))
if mean_sq.size(1) != self.num_features:
raise Exception(
'Squared mean tensor size not equal to number of features : given {}, expected {}'
.format(mean_sq.size(1), self.num_features))
std = torch.sqrt(self.eps + mean_sq - mean ** 2)
x = x - mean
x = x / std
x = x * self.gamma
x = x + self.beta
return x
def __repr__(self):
return '{name}(num_features={num_features}, eps={eps}'.format(name=
self.__class__.__name__, **self.__dict__)
def forward(self, input_0, input_1, input_2):
primals_4 = self.gamma
primals_5 = self.beta
primals_1 = input_0
primals_2 = input_1
primals_3 = input_2
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1], output[2]
| Silent-Zebra/JEM | VirtualBatchNorm1d | false | 17,937 | [
"Apache-2.0"
] | 6 | 33440aff8429d9a24a8ba858d0209f4b48be8e05 | https://github.com/Silent-Zebra/JEM/tree/33440aff8429d9a24a8ba858d0209f4b48be8e05 |
WeightNet_DW | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/nz/cnzmktfxcord7pcoprpzois2x6jhnz2wdw42eajsvyzqrviodi6d.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 16
y1 = (yindex // 16)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (16*x2) + (65536*y1)), tmp0, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/5k/c5kkhqlod2mtlbh4u6vikb2okcixlpjn5qavkcdbzwem6zkex3wm.py
# Topologically Sorted Source Nodes: [x_w, x_w_1], Original ATen: [aten.convolution, aten.sigmoid]
# Source node to ATen node mapping:
# x_w => convolution
# x_w_1 => sigmoid
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_sigmoid_1 = async_compile.triton('triton_poi_fused_convolution_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/li/clixw65viq5p3o6weatexxst5abcf2ubpnqut5dffri2mt2lmj7h.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.view]
# Source node to ATen node mapping:
# x => view
# Graph fragment:
# %view : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%primals_5, [1, -1, 4, 4]), kwargs = {})
triton_poi_fused_view_2 = async_compile.triton('triton_poi_fused_view_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_view_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_view_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (x1 + (16*y0)), xmask & ymask)
tl.store(out_ptr0 + (y0 + (16*x1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/65/c65xpx3jdhlsxxilbp5lqbsdyclqcypcqq5dow6oj7ivfs4zzm34.py
# Topologically Sorted Source Nodes: [x_w_3], Original ATen: [aten.view]
# Source node to ATen node mapping:
# x_w_3 => view_1
# Graph fragment:
# %view_1 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%convolution_1, [-1, 1, 4, 4]), kwargs = {})
triton_poi_fused_view_3 = async_compile.triton('triton_poi_fused_view_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_view_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_view_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + ((64*(x0 % 4096)) + (262144*(x0 // 262144)) + ((x0 // 4096) % 64)), None, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/7e/c7ejy3idmtal7fjpxckcr3qhwfkrlsre2sugbrluplqs4w3af364.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.view]
# Source node to ATen node mapping:
# x_2 => view_2
# Graph fragment:
# %view_2 : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%convolution_2, [-1, 4, 5, 5]), kwargs = {})
triton_poi_fused_view_4 = async_compile.triton('triton_poi_fused_view_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_view_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_view_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 65536
xnumel = 25
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (65536*x1)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + (25*y0)), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 16, 64, 64), (65536, 4096, 64, 1))
assert_size_stride(primals_4, (64, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16, 64, 64), (65536, 1, 1024, 16), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_3, buf0, 64, 4096, grid=grid(64, 4096), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [x_w], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 64, 64), (16384, 1, 256, 4))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [x_w, x_w_1], Original ATen: [aten.convolution, aten.sigmoid]
triton_poi_fused_convolution_sigmoid_1.run(buf2, primals_2, 65536, grid=grid(65536), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [x_w_2], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf3, (4, 64, 64, 64), (262144, 1, 4096, 64))
buf4 = empty_strided_cuda((1, 16, 4, 4), (256, 1, 64, 16), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.view]
triton_poi_fused_view_2.run(primals_5, buf4, 16, 16, grid=grid(16, 16), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((65536, 1, 4, 4), (16, 4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_w_3], Original ATen: [aten.view]
triton_poi_fused_view_3.run(buf3, buf5, 1048576, grid=grid(1048576), stream=stream0)
del buf3
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf4, buf5, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=16, bias=None)
assert_size_stride(buf6, (1, 65536, 5, 5), (1638400, 1, 327680, 65536))
buf7 = empty_strided_cuda((16384, 4, 5, 5), (100, 25, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.view]
triton_poi_fused_view_4.run(buf6, buf7, 65536, 25, grid=grid(65536, 25), stream=stream0)
del buf6
return (buf7, primals_1, buf0, primals_4, buf2, buf4, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 16, 1, 1), (16, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 16, 64, 64), (65536, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class WeightNet_DW(nn.Module):
""" Here we show a grouping manner when we apply
WeightNet to a depthwise convolution. The grouped
fc layer directly generates the convolutional kernel,
has fewer parameters while achieving comparable results.
This layer has M/G*inp inputs, inp groups and inp*ksize*ksize outputs.
Args:
inp (int): Number of input channels
oup (int): Number of output channels
ksize (int): Size of the convolving kernel
stride (int): Stride of the convolution
"""
def __init__(self, inp, ksize, stride):
super().__init__()
self.M = 2
self.G = 2
self.pad = ksize // 2
inp_gap = max(16, inp // 16)
self.inp = inp
self.ksize = ksize
self.stride = stride
self.wn_fc1 = nn.Conv2d(inp_gap, self.M // self.G * inp, 1, 1, 0,
groups=1, bias=True)
self.sigmoid = nn.Sigmoid()
self.wn_fc2 = nn.Conv2d(self.M // self.G * inp, inp * ksize * ksize,
1, 1, 0, groups=inp, bias=False)
def forward(self, x, x_gap):
""" Input:
x (bs*c*h*w): the output feature from previous convolution layer
x_gap (bs*inp_gap*1*1): the output feature from reduction layer
"""
x_w = self.wn_fc1(x_gap)
x_w = self.sigmoid(x_w)
x_w = self.wn_fc2(x_w)
batch_size = x.shape[0]
x = x.reshape(1, -1, x.shape[2], x.shape[3])
x_w = x_w.reshape(-1, 1, self.ksize, self.ksize)
x = F.conv2d(x, weight=x_w, stride=self.stride, padding=self.pad,
groups=batch_size * self.inp)
x = x.reshape(-1, self.inp, x.shape[2], x.shape[3])
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 16, 64, 64])]
def get_init_inputs():
return [[], {'inp': 4, 'ksize': 4, 'stride': 1}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 16
y1 = yindex // 16
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 16 * x2 + 65536 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, None)
@triton.jit
def triton_poi_fused_view_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (x1 + 16 * y0), xmask & ymask)
tl.store(out_ptr0 + (y0 + 16 * x1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_view_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + (64 * (x0 % 4096) + 262144 * (x0 // 262144) +
x0 // 4096 % 64), None, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, None)
@triton.jit
def triton_poi_fused_view_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 65536 * x1), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (x1 + 25 * y0), tmp0, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 16, 64, 64), (65536, 4096, 64, 1))
assert_size_stride(primals_4, (64, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16, 64, 64), (65536, 1, 1024, 16),
torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(64, 4096)](primals_3, buf0, 64, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_3
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 64, 64), (16384, 1, 256, 4))
buf2 = buf1
del buf1
triton_poi_fused_convolution_sigmoid_1[grid(65536)](buf2, primals_2,
65536, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf3, (4, 64, 64, 64), (262144, 1, 4096, 64))
buf4 = empty_strided_cuda((1, 16, 4, 4), (256, 1, 64, 16), torch.
float32)
triton_poi_fused_view_2[grid(16, 16)](primals_5, buf4, 16, 16,
XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((65536, 1, 4, 4), (16, 4, 4, 1), torch.
float32)
triton_poi_fused_view_3[grid(1048576)](buf3, buf5, 1048576, XBLOCK=
1024, num_warps=4, num_stages=1)
del buf3
buf6 = extern_kernels.convolution(buf4, buf5, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=16, bias=None)
assert_size_stride(buf6, (1, 65536, 5, 5), (1638400, 1, 327680, 65536))
buf7 = empty_strided_cuda((16384, 4, 5, 5), (100, 25, 5, 1), torch.
float32)
triton_poi_fused_view_4[grid(65536, 25)](buf6, buf7, 65536, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del buf6
return buf7, primals_1, buf0, primals_4, buf2, buf4, buf5
class WeightNet_DWNew(nn.Module):
""" Here we show a grouping manner when we apply
WeightNet to a depthwise convolution. The grouped
fc layer directly generates the convolutional kernel,
has fewer parameters while achieving comparable results.
This layer has M/G*inp inputs, inp groups and inp*ksize*ksize outputs.
Args:
inp (int): Number of input channels
oup (int): Number of output channels
ksize (int): Size of the convolving kernel
stride (int): Stride of the convolution
"""
def __init__(self, inp, ksize, stride):
super().__init__()
self.M = 2
self.G = 2
self.pad = ksize // 2
inp_gap = max(16, inp // 16)
self.inp = inp
self.ksize = ksize
self.stride = stride
self.wn_fc1 = nn.Conv2d(inp_gap, self.M // self.G * inp, 1, 1, 0,
groups=1, bias=True)
self.sigmoid = nn.Sigmoid()
self.wn_fc2 = nn.Conv2d(self.M // self.G * inp, inp * ksize * ksize,
1, 1, 0, groups=inp, bias=False)
def forward(self, input_0, input_1):
primals_1 = self.wn_fc1.weight
primals_2 = self.wn_fc1.bias
primals_4 = self.wn_fc2.weight
primals_5 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| Sense-GVT/BigPretrain | WeightNet_DW | false | 17,938 | [
"Apache-2.0"
] | 8 | d8d9b43d94dd1364c18c1e5ba21b85a31cdbba9e | https://github.com/Sense-GVT/BigPretrain/tree/d8d9b43d94dd1364c18c1e5ba21b85a31cdbba9e |
SSIM | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/ej/cej2a6xxnolmzoep5ojsfqcahzezsuoiyyzy53hjcqadwccridxo.py
# Topologically Sorted Source Nodes: [x, y, mul], Original ATen: [aten.reflection_pad2d, aten.mul]
# Source node to ATen node mapping:
# mul => mul
# x => _unsafe_index, _unsafe_index_1
# y => _unsafe_index_2, _unsafe_index_3
# Graph fragment:
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_1 : [num_users=3] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_3]), kwargs = {})
# %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg1_1, [None, None, %sub_5, None]), kwargs = {})
# %_unsafe_index_3 : [num_users=3] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_2, [None, None, None, %sub_7]), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_1, %_unsafe_index_3), kwargs = {})
triton_poi_fused_mul_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_mul_reflection_pad2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_reflection_pad2d_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = (xindex // 6) % 6
x2 = (xindex // 36)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/hn/chnkzxhzqq76ck2yjcccwwqis5ftbw3hrr6yefhkstf3wqajozke.py
# Topologically Sorted Source Nodes: [x, mu_x, mul_2, y, mu_y, mul_3, add, mul, avg_pool2d_4, mul_1, sigma_xy, mul_4, add_1, SSIM_n, pow_5, pow_6, add_2, add_3, pow_1, avg_pool2d_2, pow_2, sigma_x, pow_3, avg_pool2d_3, pow_4, sigma_y, add_4, add_5, SSIM_d, truediv, sub_3, truediv_1, clamp], Original ATen: [aten.reflection_pad2d, aten.avg_pool2d, aten.mul, aten.add, aten.sub, aten.pow, aten.div, aten.rsub, aten.clamp]
# Source node to ATen node mapping:
# SSIM_d => mul_6
# SSIM_n => mul_5
# add => add
# add_1 => add_1
# add_2 => add_2
# add_3 => add_3
# add_4 => add_4
# add_5 => add_5
# avg_pool2d_2 => avg_pool2d_2
# avg_pool2d_3 => avg_pool2d_3
# avg_pool2d_4 => avg_pool2d_4
# clamp => clamp_max, clamp_min
# mu_x => avg_pool2d
# mu_y => avg_pool2d_1
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# pow_1 => pow_1
# pow_2 => pow_2
# pow_3 => pow_3
# pow_4 => pow_4
# pow_5 => pow_5
# pow_6 => pow_6
# sigma_x => sub_8
# sigma_xy => sub_10
# sigma_y => sub_9
# sub_3 => sub_11
# truediv => div
# truediv_1 => div_1
# x => _unsafe_index, _unsafe_index_1
# y => _unsafe_index_2, _unsafe_index_3
# Graph fragment:
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_1 : [num_users=3] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_3]), kwargs = {})
# %avg_pool2d : [num_users=4] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%_unsafe_index_1, [3, 3], [1, 1]), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%avg_pool2d, 2), kwargs = {})
# %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg1_1, [None, None, %sub_5, None]), kwargs = {})
# %_unsafe_index_3 : [num_users=3] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_2, [None, None, None, %sub_7]), kwargs = {})
# %avg_pool2d_1 : [num_users=4] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%_unsafe_index_3, [3, 3], [1, 1]), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %avg_pool2d_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, 0.0001), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_1, %_unsafe_index_3), kwargs = {})
# %avg_pool2d_4 : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%mul, [3, 3], [1, 1]), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%avg_pool2d, %avg_pool2d_1), kwargs = {})
# %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%avg_pool2d_4, %mul_1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_10, 2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, 0.0009), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %add_1), kwargs = {})
# %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%avg_pool2d, 2), kwargs = {})
# %pow_6 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%avg_pool2d_1, 2), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_5, %pow_6), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, 0.0001), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%_unsafe_index_1, 2), kwargs = {})
# %avg_pool2d_2 : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%pow_1, [3, 3], [1, 1]), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%avg_pool2d, 2), kwargs = {})
# %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%avg_pool2d_2, %pow_2), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%_unsafe_index_3, 2), kwargs = {})
# %avg_pool2d_3 : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%pow_3, [3, 3], [1, 1]), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%avg_pool2d_1, 2), kwargs = {})
# %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%avg_pool2d_3, %pow_4), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_8, %sub_9), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, 0.0009), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_3, %add_5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_5, %mul_6), kwargs = {})
# %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_11, 2), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%div_1, 0), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 1), kwargs = {})
triton_poi_fused_add_avg_pool2d_clamp_div_mul_pow_reflection_pad2d_rsub_sub_1 = async_compile.triton('triton_poi_fused_add_avg_pool2d_clamp_div_mul_pow_reflection_pad2d_rsub_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_avg_pool2d_clamp_div_mul_pow_reflection_pad2d_rsub_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 27, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_avg_pool2d_clamp_div_mul_pow_reflection_pad2d_rsub_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (6*x1) + (36*x2)), xmask)
tmp1 = tl.load(in_ptr0 + (1 + x0 + (6*x1) + (36*x2)), xmask)
tmp3 = tl.load(in_ptr0 + (2 + x0 + (6*x1) + (36*x2)), xmask)
tmp5 = tl.load(in_ptr0 + (6 + x0 + (6*x1) + (36*x2)), xmask)
tmp7 = tl.load(in_ptr0 + (7 + x0 + (6*x1) + (36*x2)), xmask)
tmp9 = tl.load(in_ptr0 + (8 + x0 + (6*x1) + (36*x2)), xmask)
tmp11 = tl.load(in_ptr0 + (12 + x0 + (6*x1) + (36*x2)), xmask)
tmp13 = tl.load(in_ptr0 + (13 + x0 + (6*x1) + (36*x2)), xmask)
tmp15 = tl.load(in_ptr0 + (14 + x0 + (6*x1) + (36*x2)), xmask)
tmp19 = tl.load(in_ptr1 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr1 + (15 + ((-1)*(tl_math.abs((-3) + x0))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask)
tmp22 = tl.load(in_ptr1 + (15 + ((-1)*(tl_math.abs((-2) + x0))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask)
tmp24 = tl.load(in_ptr1 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + x1))) + (16*x2)), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr1 + (15 + ((-1)*(tl_math.abs((-3) + x0))) + ((-4)*(tl_math.abs((-3) + x1))) + (16*x2)), xmask)
tmp28 = tl.load(in_ptr1 + (15 + ((-1)*(tl_math.abs((-2) + x0))) + ((-4)*(tl_math.abs((-3) + x1))) + (16*x2)), xmask)
tmp30 = tl.load(in_ptr1 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-2) + x1))) + (16*x2)), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr1 + (15 + ((-1)*(tl_math.abs((-3) + x0))) + ((-4)*(tl_math.abs((-2) + x1))) + (16*x2)), xmask)
tmp34 = tl.load(in_ptr1 + (15 + ((-1)*(tl_math.abs((-2) + x0))) + ((-4)*(tl_math.abs((-2) + x1))) + (16*x2)), xmask)
tmp55 = tl.load(in_ptr2 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last')
tmp56 = tl.load(in_ptr2 + (15 + ((-1)*(tl_math.abs((-3) + x0))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask)
tmp58 = tl.load(in_ptr2 + (15 + ((-1)*(tl_math.abs((-2) + x0))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask)
tmp60 = tl.load(in_ptr2 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + x1))) + (16*x2)), xmask, eviction_policy='evict_last')
tmp62 = tl.load(in_ptr2 + (15 + ((-1)*(tl_math.abs((-3) + x0))) + ((-4)*(tl_math.abs((-3) + x1))) + (16*x2)), xmask)
tmp64 = tl.load(in_ptr2 + (15 + ((-1)*(tl_math.abs((-2) + x0))) + ((-4)*(tl_math.abs((-3) + x1))) + (16*x2)), xmask)
tmp66 = tl.load(in_ptr2 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-2) + x1))) + (16*x2)), xmask, eviction_policy='evict_last')
tmp68 = tl.load(in_ptr2 + (15 + ((-1)*(tl_math.abs((-3) + x0))) + ((-4)*(tl_math.abs((-2) + x1))) + (16*x2)), xmask)
tmp70 = tl.load(in_ptr2 + (15 + ((-1)*(tl_math.abs((-2) + x0))) + ((-4)*(tl_math.abs((-2) + x1))) + (16*x2)), xmask)
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp17 = 0.1111111111111111
tmp18 = tmp16 * tmp17
tmp21 = tmp20 + tmp19
tmp23 = tmp22 + tmp21
tmp25 = tmp24 + tmp23
tmp27 = tmp26 + tmp25
tmp29 = tmp28 + tmp27
tmp31 = tmp30 + tmp29
tmp33 = tmp32 + tmp31
tmp35 = tmp34 + tmp33
tmp36 = tmp35 * tmp17
tmp37 = tmp19 * tmp19
tmp38 = tmp20 * tmp20
tmp39 = tmp38 + tmp37
tmp40 = tmp22 * tmp22
tmp41 = tmp40 + tmp39
tmp42 = tmp24 * tmp24
tmp43 = tmp42 + tmp41
tmp44 = tmp26 * tmp26
tmp45 = tmp44 + tmp43
tmp46 = tmp28 * tmp28
tmp47 = tmp46 + tmp45
tmp48 = tmp30 * tmp30
tmp49 = tmp48 + tmp47
tmp50 = tmp32 * tmp32
tmp51 = tmp50 + tmp49
tmp52 = tmp34 * tmp34
tmp53 = tmp52 + tmp51
tmp54 = tmp53 * tmp17
tmp57 = tmp56 + tmp55
tmp59 = tmp58 + tmp57
tmp61 = tmp60 + tmp59
tmp63 = tmp62 + tmp61
tmp65 = tmp64 + tmp63
tmp67 = tmp66 + tmp65
tmp69 = tmp68 + tmp67
tmp71 = tmp70 + tmp69
tmp72 = tmp71 * tmp17
tmp73 = tmp55 * tmp55
tmp74 = tmp56 * tmp56
tmp75 = tmp74 + tmp73
tmp76 = tmp58 * tmp58
tmp77 = tmp76 + tmp75
tmp78 = tmp60 * tmp60
tmp79 = tmp78 + tmp77
tmp80 = tmp62 * tmp62
tmp81 = tmp80 + tmp79
tmp82 = tmp64 * tmp64
tmp83 = tmp82 + tmp81
tmp84 = tmp66 * tmp66
tmp85 = tmp84 + tmp83
tmp86 = tmp68 * tmp68
tmp87 = tmp86 + tmp85
tmp88 = tmp70 * tmp70
tmp89 = tmp88 + tmp87
tmp90 = tmp89 * tmp17
tmp91 = 2.0
tmp92 = tmp36 * tmp91
tmp93 = tmp92 * tmp72
tmp94 = 0.0001
tmp95 = tmp93 + tmp94
tmp96 = tmp36 * tmp72
tmp97 = tmp18 - tmp96
tmp98 = tmp97 * tmp91
tmp99 = 0.0009
tmp100 = tmp98 + tmp99
tmp101 = tmp95 * tmp100
tmp102 = tmp36 * tmp36
tmp103 = tmp72 * tmp72
tmp104 = tmp102 + tmp103
tmp105 = tmp104 + tmp94
tmp106 = tmp54 - tmp102
tmp107 = tmp90 - tmp103
tmp108 = tmp106 + tmp107
tmp109 = tmp108 + tmp99
tmp110 = tmp105 * tmp109
tmp111 = tmp101 / tmp110
tmp112 = 1.0
tmp113 = tmp112 - tmp111
tmp114 = 0.5
tmp115 = tmp113 * tmp114
tmp116 = 0.0
tmp117 = triton_helpers.maximum(tmp115, tmp116)
tmp118 = triton_helpers.minimum(tmp117, tmp112)
tl.store(in_out_ptr0 + (x3), tmp118, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, y, mul], Original ATen: [aten.reflection_pad2d, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_reflection_pad2d_0.run(arg0_1, arg1_1, buf2, 576, grid=grid(576), stream=stream0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf6 = buf0; del buf0 # reuse
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [x, mu_x, mul_2, y, mu_y, mul_3, add, mul, avg_pool2d_4, mul_1, sigma_xy, mul_4, add_1, SSIM_n, pow_5, pow_6, add_2, add_3, pow_1, avg_pool2d_2, pow_2, sigma_x, pow_3, avg_pool2d_3, pow_4, sigma_y, add_4, add_5, SSIM_d, truediv, sub_3, truediv_1, clamp], Original ATen: [aten.reflection_pad2d, aten.avg_pool2d, aten.mul, aten.add, aten.sub, aten.pow, aten.div, aten.rsub, aten.clamp]
triton_poi_fused_add_avg_pool2d_clamp_div_mul_pow_reflection_pad2d_rsub_sub_1.run(buf7, buf2, arg0_1, arg1_1, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
del buf2
return (buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class SSIM(nn.Module):
"""Layer to compute the SSIM loss between a pair of images
"""
def __init__(self):
super(SSIM, self).__init__()
self.mu_x_pool = nn.AvgPool2d(3, 1)
self.mu_y_pool = nn.AvgPool2d(3, 1)
self.sig_x_pool = nn.AvgPool2d(3, 1)
self.sig_y_pool = nn.AvgPool2d(3, 1)
self.sig_xy_pool = nn.AvgPool2d(3, 1)
self.refl = nn.ReflectionPad2d(1)
self.C1 = 0.01 ** 2
self.C2 = 0.03 ** 2
def forward(self, x, y):
x = self.refl(x)
y = self.refl(y)
mu_x = self.mu_x_pool(x)
mu_y = self.mu_y_pool(y)
sigma_x = self.sig_x_pool(x ** 2) - mu_x ** 2
sigma_y = self.sig_y_pool(y ** 2) - mu_y ** 2
sigma_xy = self.sig_xy_pool(x * y) - mu_x * mu_y
SSIM_n = (2 * mu_x * mu_y + self.C1) * (2 * sigma_xy + self.C2)
SSIM_d = (mu_x ** 2 + mu_y ** 2 + self.C1) * (sigma_x + sigma_y +
self.C2)
return torch.clamp((1 - SSIM_n / SSIM_d) / 2, 0, 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_reflection_pad2d_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_avg_pool2d_clamp_div_mul_pow_reflection_pad2d_rsub_sub_1(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 6 * x1 + 36 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (1 + x0 + 6 * x1 + 36 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (2 + x0 + 6 * x1 + 36 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (6 + x0 + 6 * x1 + 36 * x2), xmask)
tmp7 = tl.load(in_ptr0 + (7 + x0 + 6 * x1 + 36 * x2), xmask)
tmp9 = tl.load(in_ptr0 + (8 + x0 + 6 * x1 + 36 * x2), xmask)
tmp11 = tl.load(in_ptr0 + (12 + x0 + 6 * x1 + 36 * x2), xmask)
tmp13 = tl.load(in_ptr0 + (13 + x0 + 6 * x1 + 36 * x2), xmask)
tmp15 = tl.load(in_ptr0 + (14 + x0 + 6 * x1 + 36 * x2), xmask)
tmp19 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + x0) + -4 *
tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask)
tmp22 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-2 + x0) + -4 *
tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask)
tmp24 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + x1) + 16 * x2), xmask, eviction_policy
='evict_last')
tmp26 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + x0) + -4 *
tl_math.abs(-3 + x1) + 16 * x2), xmask)
tmp28 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-2 + x0) + -4 *
tl_math.abs(-3 + x1) + 16 * x2), xmask)
tmp30 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-2 + x1) + 16 * x2), xmask, eviction_policy
='evict_last')
tmp32 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + x0) + -4 *
tl_math.abs(-2 + x1) + 16 * x2), xmask)
tmp34 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-2 + x0) + -4 *
tl_math.abs(-2 + x1) + 16 * x2), xmask)
tmp55 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tmp56 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + x0) + -4 *
tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask)
tmp58 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-2 + x0) + -4 *
tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask)
tmp60 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + x1) + 16 * x2), xmask, eviction_policy
='evict_last')
tmp62 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + x0) + -4 *
tl_math.abs(-3 + x1) + 16 * x2), xmask)
tmp64 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-2 + x0) + -4 *
tl_math.abs(-3 + x1) + 16 * x2), xmask)
tmp66 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-2 + x1) + 16 * x2), xmask, eviction_policy
='evict_last')
tmp68 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + x0) + -4 *
tl_math.abs(-2 + x1) + 16 * x2), xmask)
tmp70 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-2 + x0) + -4 *
tl_math.abs(-2 + x1) + 16 * x2), xmask)
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp17 = 0.1111111111111111
tmp18 = tmp16 * tmp17
tmp21 = tmp20 + tmp19
tmp23 = tmp22 + tmp21
tmp25 = tmp24 + tmp23
tmp27 = tmp26 + tmp25
tmp29 = tmp28 + tmp27
tmp31 = tmp30 + tmp29
tmp33 = tmp32 + tmp31
tmp35 = tmp34 + tmp33
tmp36 = tmp35 * tmp17
tmp37 = tmp19 * tmp19
tmp38 = tmp20 * tmp20
tmp39 = tmp38 + tmp37
tmp40 = tmp22 * tmp22
tmp41 = tmp40 + tmp39
tmp42 = tmp24 * tmp24
tmp43 = tmp42 + tmp41
tmp44 = tmp26 * tmp26
tmp45 = tmp44 + tmp43
tmp46 = tmp28 * tmp28
tmp47 = tmp46 + tmp45
tmp48 = tmp30 * tmp30
tmp49 = tmp48 + tmp47
tmp50 = tmp32 * tmp32
tmp51 = tmp50 + tmp49
tmp52 = tmp34 * tmp34
tmp53 = tmp52 + tmp51
tmp54 = tmp53 * tmp17
tmp57 = tmp56 + tmp55
tmp59 = tmp58 + tmp57
tmp61 = tmp60 + tmp59
tmp63 = tmp62 + tmp61
tmp65 = tmp64 + tmp63
tmp67 = tmp66 + tmp65
tmp69 = tmp68 + tmp67
tmp71 = tmp70 + tmp69
tmp72 = tmp71 * tmp17
tmp73 = tmp55 * tmp55
tmp74 = tmp56 * tmp56
tmp75 = tmp74 + tmp73
tmp76 = tmp58 * tmp58
tmp77 = tmp76 + tmp75
tmp78 = tmp60 * tmp60
tmp79 = tmp78 + tmp77
tmp80 = tmp62 * tmp62
tmp81 = tmp80 + tmp79
tmp82 = tmp64 * tmp64
tmp83 = tmp82 + tmp81
tmp84 = tmp66 * tmp66
tmp85 = tmp84 + tmp83
tmp86 = tmp68 * tmp68
tmp87 = tmp86 + tmp85
tmp88 = tmp70 * tmp70
tmp89 = tmp88 + tmp87
tmp90 = tmp89 * tmp17
tmp91 = 2.0
tmp92 = tmp36 * tmp91
tmp93 = tmp92 * tmp72
tmp94 = 0.0001
tmp95 = tmp93 + tmp94
tmp96 = tmp36 * tmp72
tmp97 = tmp18 - tmp96
tmp98 = tmp97 * tmp91
tmp99 = 0.0009
tmp100 = tmp98 + tmp99
tmp101 = tmp95 * tmp100
tmp102 = tmp36 * tmp36
tmp103 = tmp72 * tmp72
tmp104 = tmp102 + tmp103
tmp105 = tmp104 + tmp94
tmp106 = tmp54 - tmp102
tmp107 = tmp90 - tmp103
tmp108 = tmp106 + tmp107
tmp109 = tmp108 + tmp99
tmp110 = tmp105 * tmp109
tmp111 = tmp101 / tmp110
tmp112 = 1.0
tmp113 = tmp112 - tmp111
tmp114 = 0.5
tmp115 = tmp113 * tmp114
tmp116 = 0.0
tmp117 = triton_helpers.maximum(tmp115, tmp116)
tmp118 = triton_helpers.minimum(tmp117, tmp112)
tl.store(in_out_ptr0 + x3, tmp118, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_reflection_pad2d_0[grid(576)](arg0_1, arg1_1,
buf2, 576, XBLOCK=128, num_warps=4, num_stages=1)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf6 = buf0
del buf0
buf7 = buf6
del buf6
triton_poi_fused_add_avg_pool2d_clamp_div_mul_pow_reflection_pad2d_rsub_sub_1[
grid(256)](buf7, buf2, arg0_1, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del buf2
return buf7,
class SSIMNew(nn.Module):
"""Layer to compute the SSIM loss between a pair of images
"""
def __init__(self):
super(SSIMNew, self).__init__()
self.mu_x_pool = nn.AvgPool2d(3, 1)
self.mu_y_pool = nn.AvgPool2d(3, 1)
self.sig_x_pool = nn.AvgPool2d(3, 1)
self.sig_y_pool = nn.AvgPool2d(3, 1)
self.sig_xy_pool = nn.AvgPool2d(3, 1)
self.refl = nn.ReflectionPad2d(1)
self.C1 = 0.01 ** 2
self.C2 = 0.03 ** 2
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| Sid1057/sid1057.github.io | SSIM | false | 17,939 | [
"MIT"
] | 4 | 623d1731e308b42b6f86304dcfd671a061b414bf | https://github.com/Sid1057/sid1057.github.io/tree/623d1731e308b42b6f86304dcfd671a061b414bf |
GraphConvolution | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/qy/cqyfcfmggrmtcahfwcn5wrkwarx42uue3wdjigqgmz46ojy52mak.py
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %view_3), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm]
extern_kernels.bmm(primals_4, primals_3, out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2)
del primals_5
buf3 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(buf3, primals_2, buf2, primals_6, 64, grid=grid(64), stream=stream0)
del buf2
del primals_2
del primals_6
return (buf3, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(buf1, (16, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import torch
import torch.autograd
import torch.nn as nn
from torch.nn.modules.module import Module
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907.
"""
def __init__(self, state_dim, name='', out_state_dim=None):
super(GraphConvolution, self).__init__()
self.state_dim = state_dim
if out_state_dim is None:
self.out_state_dim = state_dim
else:
self.out_state_dim = out_state_dim
self.fc1 = nn.Linear(in_features=self.state_dim, out_features=self.
out_state_dim)
self.fc2 = nn.Linear(in_features=self.state_dim, out_features=self.
out_state_dim)
self.name = name
def forward(self, input, adj):
state_in = self.fc1(input)
forward_input = self.fc2(torch.bmm(adj, input))
return state_in + forward_input
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
import torch.autograd
import torch.nn as nn
from torch.nn.modules.module import Module
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(primals_4, primals_3, out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2)
del primals_5
buf3 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_add_0[grid(64)](buf3, primals_2, buf2, primals_6,
64, XBLOCK=64, num_warps=1, num_stages=1)
del buf2
del primals_2
del primals_6
return buf3, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0
), reinterpret_tensor(buf1, (16, 4), (4, 1), 0)
class GraphConvolutionNew(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907.
"""
def __init__(self, state_dim, name='', out_state_dim=None):
super(GraphConvolutionNew, self).__init__()
self.state_dim = state_dim
if out_state_dim is None:
self.out_state_dim = state_dim
else:
self.out_state_dim = out_state_dim
self.fc1 = nn.Linear(in_features=self.state_dim, out_features=self.
out_state_dim)
self.fc2 = nn.Linear(in_features=self.state_dim, out_features=self.
out_state_dim)
self.name = name
def forward(self, input_0, input_1):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_5 = self.fc2.weight
primals_6 = self.fc2.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| SowmyaAitha/Palmira | GraphConvolution | false | 17,940 | [
"MIT"
] | 6 | c3ae884e35b8b3703a5e4ba52d7b0bdae6da1bad | https://github.com/SowmyaAitha/Palmira/tree/c3ae884e35b8b3703a5e4ba52d7b0bdae6da1bad |
Decoder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/g3/cg3ccnqa6wyzxanbi6fgd6vkyhgnnoo3va4nix3ohszhmyutqgep.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.reflection_pad2d]
# Source node to ATen node mapping:
# x => _unsafe_index, _unsafe_index_1
# Graph fragment:
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {})
triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 73728
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 6
x1 = (xindex // 6) % 6
x2 = (xindex // 36)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), None, eviction_policy='evict_last')
tl.store(out_ptr0 + (x3), tmp0, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ki/ckii6ld2pjui2kyh33ieyagpaxjso55jvdwg3kttj5omjlrhkys6.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# x_2 => add, add_1, convert_element_type, convert_element_type_1, iota_2, mul, mul_1
# Graph fragment:
# %iota_2 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (8,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_2, 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 0), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add, torch.float32), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 0.5), kwargs = {})
# %convert_element_type_1 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_1, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_1 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_1(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/iw/ciwouyfqxfi5suuzcagn3bbdbaulgyw4bpqnsdjlizmkc2oz2muq.py
# Topologically Sorted Source Nodes: [conv2d, x_1, x_2, x_3], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.reflection_pad2d]
# Source node to ATen node mapping:
# conv2d => convolution
# x_1 => relu
# x_2 => _unsafe_index_2
# x_3 => _unsafe_index_3, _unsafe_index_4
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu, [None, None, %unsqueeze, %convert_element_type_1]), kwargs = {})
# %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_2, [None, None, %sub_5, None]), kwargs = {})
# %_unsafe_index_4 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_3, [None, None, None, %sub_5]), kwargs = {})
triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2 = async_compile.triton('triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 102400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 10) % 10
x0 = xindex % 10
x4 = (xindex // 100)
x2 = (xindex // 100) % 256
x7 = xindex
tmp0 = tl.load(in_ptr0 + (7 + ((-1)*(tl_math.abs((-7) + (tl_math.abs((-1) + x1)))))), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (7 + ((-1)*(tl_math.abs((-7) + (tl_math.abs((-1) + x0)))))), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (x2), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + (4*tmp4) + (16*x4)), None, eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x7), tmp13, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/x6/cx6kpvwny3lwf4b75yjzmpojqlofwsrfkk23uf3lterlix2wg3ah.py
# Topologically Sorted Source Nodes: [conv2d_1, x_4, x_5], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x_4 => relu_1
# x_5 => _unsafe_index_5, _unsafe_index_6
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_4, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
# %_unsafe_index_5 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_1, [None, None, %sub_5, None]), kwargs = {})
# %_unsafe_index_6 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_5, [None, None, None, %sub_5]), kwargs = {})
triton_poi_fused_convolution_reflection_pad2d_relu_3 = async_compile.triton('triton_poi_fused_convolution_reflection_pad2d_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_reflection_pad2d_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 102400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 10
x1 = (xindex // 10) % 10
x4 = (xindex // 100)
x2 = (xindex // 100) % 256
x5 = xindex
tmp0 = tl.load(in_ptr0 + (63 + ((-1)*(tl_math.abs((-7) + (tl_math.abs((-1) + x0))))) + ((-8)*(tl_math.abs((-7) + (tl_math.abs((-1) + x1))))) + (64*x4)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x5), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/od/codkvvxpur7vfcixyfgg6ayyzatao2cw5of6bxy53exotyuzofwb.py
# Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# x_11 => add_4, add_5, convert_element_type_4, convert_element_type_5, iota_12, mul_4, mul_5
# Graph fragment:
# %iota_12 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (16,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_12, 1), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, 0), kwargs = {})
# %convert_element_type_4 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_4, torch.float32), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_4, 0.0), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_5, 0.5), kwargs = {})
# %convert_element_type_5 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_5, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_4 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_4(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/u4/cu42vug3hzqlursadot6js3gerux56tz7joz5blmrj6a5xowpyd6.py
# Topologically Sorted Source Nodes: [conv2d_4, x_10, x_11, x_12], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.reflection_pad2d]
# Source node to ATen node mapping:
# conv2d_4 => convolution_4
# x_10 => relu_4
# x_11 => _unsafe_index_11
# x_12 => _unsafe_index_12, _unsafe_index_13
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_10, %primals_10, %primals_11, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
# %_unsafe_index_11 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_4, [None, None, %unsqueeze_1, %convert_element_type_5]), kwargs = {})
# %_unsafe_index_12 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_11, [None, None, %sub_21, None]), kwargs = {})
# %_unsafe_index_13 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_12, [None, None, None, %sub_21]), kwargs = {})
triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5 = async_compile.triton('triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 165888
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 18) % 18
x0 = xindex % 18
x4 = (xindex // 324)
x2 = (xindex // 324) % 128
x7 = xindex
tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-15) + (tl_math.abs((-1) + x1)))))), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-15) + (tl_math.abs((-1) + x0)))))), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (x2), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 8, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + (8*tmp4) + (64*x4)), None, eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x7), tmp13, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/lz/clznsh6fdhksacfsunvntibvsjaoxwmok2tqs5avvjtlb7uojj5w.py
# Topologically Sorted Source Nodes: [conv2d_5, x_13, x_14], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d]
# Source node to ATen node mapping:
# conv2d_5 => convolution_5
# x_13 => relu_5
# x_14 => _unsafe_index_14, _unsafe_index_15
# Graph fragment:
# %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_13, %primals_12, %primals_13, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_5,), kwargs = {})
# %_unsafe_index_14 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_5, [None, None, %sub_21, None]), kwargs = {})
# %_unsafe_index_15 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_14, [None, None, None, %sub_21]), kwargs = {})
triton_poi_fused_convolution_reflection_pad2d_relu_6 = async_compile.triton('triton_poi_fused_convolution_reflection_pad2d_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_reflection_pad2d_relu_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 165888
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 18
x1 = (xindex // 18) % 18
x4 = (xindex // 324)
x2 = (xindex // 324) % 128
x5 = xindex
tmp0 = tl.load(in_ptr0 + (255 + ((-1)*(tl_math.abs((-15) + (tl_math.abs((-1) + x0))))) + ((-16)*(tl_math.abs((-15) + (tl_math.abs((-1) + x1))))) + (256*x4)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x5), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/6w/c6wyyvxovkkzfk6k64bfon5db5xfxiywvm7oionlfmjjhdvc6ugb.py
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# x_16 => add_8, add_9, convert_element_type_8, convert_element_type_9, iota_18, mul_8, mul_9
# Graph fragment:
# %iota_18 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (32,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_18, 1), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_8, 0), kwargs = {})
# %convert_element_type_8 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_8, torch.float32), kwargs = {})
# %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_8, 0.0), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_9, 0.5), kwargs = {})
# %convert_element_type_9 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_9, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_7 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_7(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/74/c74eexru54jzdd7so47hr6vfjhuchik3ect6ywzjtozle2ccoanv.py
# Topologically Sorted Source Nodes: [conv2d_6, x_15, x_16, x_17], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.reflection_pad2d]
# Source node to ATen node mapping:
# conv2d_6 => convolution_6
# x_15 => relu_6
# x_16 => _unsafe_index_16
# x_17 => _unsafe_index_17, _unsafe_index_18
# Graph fragment:
# %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_15, %primals_14, %primals_15, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_6 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_6,), kwargs = {})
# %_unsafe_index_16 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_6, [None, None, %unsqueeze_2, %convert_element_type_9]), kwargs = {})
# %_unsafe_index_17 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_16, [None, None, %sub_29, None]), kwargs = {})
# %_unsafe_index_18 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_17, [None, None, None, %sub_29]), kwargs = {})
triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_8 = async_compile.triton('triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 295936
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 34) % 34
x0 = xindex % 34
x4 = (xindex // 1156)
x2 = (xindex // 1156) % 64
x7 = xindex
tmp0 = tl.load(in_ptr0 + (31 + ((-1)*(tl_math.abs((-31) + (tl_math.abs((-1) + x1)))))), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (31 + ((-1)*(tl_math.abs((-31) + (tl_math.abs((-1) + x0)))))), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 16, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + (16*tmp4) + (256*x4)), xmask, eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x7), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/hz/chz63phj47p2gjm5xtlsmszcrnfpp4xsj7dae5glkrxk5zs7hyto.py
# Topologically Sorted Source Nodes: [conv2d_7, x_18, x_19], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d]
# Source node to ATen node mapping:
# conv2d_7 => convolution_7
# x_18 => relu_7
# x_19 => _unsafe_index_19, _unsafe_index_20
# Graph fragment:
# %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_18, %primals_16, %primals_17, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_7 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_7,), kwargs = {})
# %_unsafe_index_19 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_7, [None, None, %sub_29, None]), kwargs = {})
# %_unsafe_index_20 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_19, [None, None, None, %sub_29]), kwargs = {})
triton_poi_fused_convolution_reflection_pad2d_relu_9 = async_compile.triton('triton_poi_fused_convolution_reflection_pad2d_relu_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_reflection_pad2d_relu_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_9(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 295936
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 34
x1 = (xindex // 34) % 34
x4 = (xindex // 1156)
x2 = (xindex // 1156) % 64
x5 = xindex
tmp0 = tl.load(in_ptr0 + (1023 + ((-1)*(tl_math.abs((-31) + (tl_math.abs((-1) + x0))))) + ((-32)*(tl_math.abs((-31) + (tl_math.abs((-1) + x1))))) + (1024*x4)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x5), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/hn/chnibi7gwebi7kg4g3szjbyckwlp74j2tqdbjdrt6nzxxpvjzwhu.py
# Topologically Sorted Source Nodes: [x_20], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_20 => convolution_8
# Graph fragment:
# %convolution_8 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_20, %primals_18, %primals_19, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_10 = async_compile.triton('triton_poi_fused_convolution_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 12288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 3
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/5o/c5ojoadwtfg74b52gh7y53jwhw2ktpi5q7bj2iprlawfjiexyy3r.py
# Topologically Sorted Source Nodes: [conv2d_7, x_18], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_7 => convolution_7
# x_18 => relu_7
# Graph fragment:
# %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_18, %primals_16, %primals_17, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_7 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_7,), kwargs = {})
# %le_18 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_7, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_11 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_11(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 64
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/aa/caa4f5re2hltkmtlkj3n4yzdxyuaw5kf6spnc6g2phrpe6faiy4x.py
# Topologically Sorted Source Nodes: [conv2d_6, x_15], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_6 => convolution_6
# x_15 => relu_6
# Graph fragment:
# %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_15, %primals_14, %primals_15, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_6 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_6,), kwargs = {})
# %le_37 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_6, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_12 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_12(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 64
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/jr/cjrwyczsgjgyy6qosgum4wgnyb2w3ihpqvja6fteyhda2zxoh5ot.py
# Topologically Sorted Source Nodes: [conv2d_5, x_13], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_5 => convolution_5
# x_13 => relu_5
# Graph fragment:
# %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_13, %primals_12, %primals_13, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_5,), kwargs = {})
# %le_56 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_5, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_13 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_13(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 128
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/sz/cszsc7yevnmc6ugclb5yovyvzqbxqknvq6uzgneygczjmkvgtczc.py
# Topologically Sorted Source Nodes: [conv2d_4, x_10], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_4 => convolution_4
# x_10 => relu_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_10, %primals_10, %primals_11, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
# %le_75 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_4, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_14 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_14(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 128
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/rf/crftfxpfxphanidbjn5jncdcv65uaywlzsfsv5jzcsmdxwvjvf2c.py
# Topologically Sorted Source Nodes: [conv2d_3, x_8], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_3 => convolution_3
# x_8 => relu_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_8, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
# %le_94 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_15 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_15(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 256
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ia/cianovbanealq7vsk3q7ox66wiuiaoeqjzpupak5lcrtiilg2ewl.py
# Topologically Sorted Source Nodes: [conv2d, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d => convolution
# x_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le_151 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_16 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_16(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 256
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19 = args
args.clear()
assert_size_stride(primals_1, (4, 512, 4, 4), (8192, 16, 4, 1))
assert_size_stride(primals_2, (256, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_3, (256, ), (1, ))
assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_5, (256, ), (1, ))
assert_size_stride(primals_6, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_7, (256, ), (1, ))
assert_size_stride(primals_8, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_9, (256, ), (1, ))
assert_size_stride(primals_10, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_11, (128, ), (1, ))
assert_size_stride(primals_12, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_13, (128, ), (1, ))
assert_size_stride(primals_14, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_15, (64, ), (1, ))
assert_size_stride(primals_16, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_17, (64, ), (1, ))
assert_size_stride(primals_18, (3, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_19, (3, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512, 6, 6), (18432, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.reflection_pad2d]
stream0 = get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0.run(primals_1, buf0, 73728, grid=grid(73728), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 256, 4, 4), (4096, 16, 4, 1))
buf2 = empty_strided_cuda((8, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_1.run(buf2, 8, grid=grid(8), stream=stream0)
buf3 = empty_strided_cuda((4, 256, 10, 10), (25600, 100, 10, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, x_1, x_2, x_3], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.reflection_pad2d]
triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2.run(buf2, buf1, primals_3, buf3, 102400, grid=grid(102400), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 256, 8, 8), (16384, 64, 8, 1))
buf5 = empty_strided_cuda((4, 256, 10, 10), (25600, 100, 10, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_1, x_4, x_5], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d]
triton_poi_fused_convolution_reflection_pad2d_relu_3.run(buf4, primals_5, buf5, 102400, grid=grid(102400), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 256, 8, 8), (16384, 64, 8, 1))
buf7 = empty_strided_cuda((4, 256, 10, 10), (25600, 100, 10, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_2, x_6, x_7], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d]
triton_poi_fused_convolution_reflection_pad2d_relu_3.run(buf6, primals_7, buf7, 102400, grid=grid(102400), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 256, 8, 8), (16384, 64, 8, 1))
buf9 = empty_strided_cuda((4, 256, 10, 10), (25600, 100, 10, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_3, x_8, x_9], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d]
triton_poi_fused_convolution_reflection_pad2d_relu_3.run(buf8, primals_9, buf9, 102400, grid=grid(102400), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 128, 8, 8), (8192, 64, 8, 1))
buf11 = empty_strided_cuda((16, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_4.run(buf11, 16, grid=grid(16), stream=stream0)
buf12 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_4, x_10, x_11, x_12], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.reflection_pad2d]
triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5.run(buf11, buf10, primals_11, buf12, 165888, grid=grid(165888), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution]
buf13 = extern_kernels.convolution(buf12, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 128, 16, 16), (32768, 256, 16, 1))
buf14 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_5, x_13, x_14], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d]
triton_poi_fused_convolution_reflection_pad2d_relu_6.run(buf13, primals_13, buf14, 165888, grid=grid(165888), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution]
buf15 = extern_kernels.convolution(buf14, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 64, 16, 16), (16384, 256, 16, 1))
buf16 = empty_strided_cuda((32, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_7.run(buf16, 32, grid=grid(32), stream=stream0)
buf17 = empty_strided_cuda((4, 64, 34, 34), (73984, 1156, 34, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_6, x_15, x_16, x_17], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.reflection_pad2d]
triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_8.run(buf16, buf15, primals_15, buf17, 295936, grid=grid(295936), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution]
buf18 = extern_kernels.convolution(buf17, primals_16, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf19 = empty_strided_cuda((4, 64, 34, 34), (73984, 1156, 34, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_7, x_18, x_19], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d]
triton_poi_fused_convolution_reflection_pad2d_relu_9.run(buf18, primals_17, buf19, 295936, grid=grid(295936), stream=stream0)
# Topologically Sorted Source Nodes: [x_20], Original ATen: [aten.convolution]
buf20 = extern_kernels.convolution(buf19, primals_18, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 3, 32, 32), (3072, 1024, 32, 1))
buf21 = buf20; del buf20 # reuse
# Topologically Sorted Source Nodes: [x_20], Original ATen: [aten.convolution]
triton_poi_fused_convolution_10.run(buf21, primals_19, 12288, grid=grid(12288), stream=stream0)
del primals_19
buf22 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_7, x_18], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_11.run(buf18, primals_17, buf22, 262144, grid=grid(262144), stream=stream0)
del buf18
del primals_17
buf23 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_6, x_15], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_12.run(buf15, primals_15, buf23, 65536, grid=grid(65536), stream=stream0)
del buf15
del primals_15
buf24 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_5, x_13], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_13.run(buf13, primals_13, buf24, 131072, grid=grid(131072), stream=stream0)
del buf13
del primals_13
buf25 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_4, x_10], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_14.run(buf10, primals_11, buf25, 32768, grid=grid(32768), stream=stream0)
del buf10
del primals_11
buf26 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_3, x_8], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_15.run(buf8, primals_9, buf26, 65536, grid=grid(65536), stream=stream0)
del buf8
del primals_9
buf27 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_2, x_6], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_15.run(buf6, primals_7, buf27, 65536, grid=grid(65536), stream=stream0)
del buf6
del primals_7
buf28 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_1, x_4], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_15.run(buf4, primals_5, buf28, 65536, grid=grid(65536), stream=stream0)
del buf4
del primals_5
buf29 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_16.run(buf1, primals_3, buf29, 16384, grid=grid(16384), stream=stream0)
del buf1
del primals_3
return (buf21, primals_2, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, buf0, buf2, buf3, buf5, buf7, buf9, buf11, buf12, buf14, buf16, buf17, buf19, buf22, buf23, buf24, buf25, buf26, buf27, buf28, buf29, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 512, 4, 4), (8192, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((256, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((128, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((64, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((3, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.pre11 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv11 = nn.Conv2d(in_channels=512, out_channels=256,
kernel_size=3, stride=1)
self.relu11 = nn.ReLU(inplace=True)
self.up1 = nn.Upsample(scale_factor=2, mode='nearest')
self.pre21 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv21 = nn.Conv2d(in_channels=256, out_channels=256,
kernel_size=3, stride=1)
self.relu21 = nn.ReLU(inplace=True)
self.pre22 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv22 = nn.Conv2d(in_channels=256, out_channels=256,
kernel_size=3, stride=1)
self.relu22 = nn.ReLU(inplace=True)
self.pre23 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv23 = nn.Conv2d(in_channels=256, out_channels=256,
kernel_size=3, stride=1)
self.relu23 = nn.ReLU(inplace=True)
self.pre24 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv24 = nn.Conv2d(in_channels=256, out_channels=128,
kernel_size=3, stride=1)
self.relu24 = nn.ReLU(inplace=True)
self.up2 = nn.Upsample(scale_factor=2, mode='nearest')
self.pre31 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv31 = nn.Conv2d(in_channels=128, out_channels=128,
kernel_size=3, stride=1)
self.relu31 = nn.ReLU(inplace=True)
self.pre32 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv32 = nn.Conv2d(in_channels=128, out_channels=64,
kernel_size=3, stride=1)
self.relu32 = nn.ReLU(inplace=True)
self.up3 = nn.Upsample(scale_factor=2, mode='nearest')
self.pre41 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv41 = nn.Conv2d(in_channels=64, out_channels=64,
kernel_size=3, stride=1)
self.relu41 = nn.ReLU(inplace=True)
self.pre42 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv42 = nn.Conv2d(in_channels=64, out_channels=3, kernel_size
=3, stride=1)
self.relu42 = nn.ReLU(inplace=True)
def forward(self, x):
x = self.pre11(x)
x = self.relu11(self.conv11(x))
x = self.up1(x)
x = self.pre21(x)
x = self.relu21(self.conv21(x))
x = self.pre22(x)
x = self.relu22(self.conv22(x))
x = self.pre23(x)
x = self.relu23(self.conv23(x))
x = self.pre24(x)
x = self.relu24(self.conv24(x))
x = self.up2(x)
x = self.pre31(x)
x = self.relu31(self.conv31(x))
x = self.pre32(x)
x = self.relu32(self.conv32(x))
x = self.up3(x)
x = self.pre41(x)
x = self.relu41(self.conv41(x))
x = self.pre42(x)
x = self.conv42(x)
return x
def get_inputs():
return [torch.rand([4, 512, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), None,
eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_1(out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 10 % 10
x0 = xindex % 10
x4 = xindex // 100
x2 = xindex // 100 % 256
x7 = xindex
tmp0 = tl.load(in_ptr0 + (7 + -1 * tl_math.abs(-7 + tl_math.abs(-1 + x1
))), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (7 + -1 * tl_math.abs(-7 + tl_math.abs(-1 + x0
))), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + 4 * tmp4 + 16 * x4), None,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x7, tmp13, None)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_3(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 10
x1 = xindex // 10 % 10
x4 = xindex // 100
x2 = xindex // 100 % 256
x5 = xindex
tmp0 = tl.load(in_ptr0 + (63 + -1 * tl_math.abs(-7 + tl_math.abs(-1 +
x0)) + -8 * tl_math.abs(-7 + tl_math.abs(-1 + x1)) + 64 * x4), None,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + x5, tmp4, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_4(out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 18 % 18
x0 = xindex % 18
x4 = xindex // 324
x2 = xindex // 324 % 128
x7 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-15 + tl_math.abs(-1 +
x1))), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-15 + tl_math.abs(-1 +
x0))), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 8, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + 8 * tmp4 + 64 * x4), None,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x7, tmp13, None)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_6(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 18
x1 = xindex // 18 % 18
x4 = xindex // 324
x2 = xindex // 324 % 128
x5 = xindex
tmp0 = tl.load(in_ptr0 + (255 + -1 * tl_math.abs(-15 + tl_math.abs(-1 +
x0)) + -16 * tl_math.abs(-15 + tl_math.abs(-1 + x1)) + 256 * x4),
None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + x5, tmp4, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_7(out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_8(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 295936
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 34 % 34
x0 = xindex % 34
x4 = xindex // 1156
x2 = xindex // 1156 % 64
x7 = xindex
tmp0 = tl.load(in_ptr0 + (31 + -1 * tl_math.abs(-31 + tl_math.abs(-1 +
x1))), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (31 + -1 * tl_math.abs(-31 + tl_math.abs(-1 +
x0))), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 16, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + 16 * tmp4 + 256 * x4), xmask,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x7, tmp13, xmask)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_9(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 295936
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 34
x1 = xindex // 34 % 34
x4 = xindex // 1156
x2 = xindex // 1156 % 64
x5 = xindex
tmp0 = tl.load(in_ptr0 + (1023 + -1 * tl_math.abs(-31 + tl_math.abs(-1 +
x0)) + -32 * tl_math.abs(-31 + tl_math.abs(-1 + x1)) + 1024 * x4),
xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + x5, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 3
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_11(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_12(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_13(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 128
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_14(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 128
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_15(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 256
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_16(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 256
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19) = args
args.clear()
assert_size_stride(primals_1, (4, 512, 4, 4), (8192, 16, 4, 1))
assert_size_stride(primals_2, (256, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_3, (256,), (1,))
assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_11, (128,), (1,))
assert_size_stride(primals_12, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_13, (128,), (1,))
assert_size_stride(primals_14, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_15, (64,), (1,))
assert_size_stride(primals_16, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_17, (64,), (1,))
assert_size_stride(primals_18, (3, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_19, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512, 6, 6), (18432, 36, 6, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(73728)](primals_1, buf0,
73728, XBLOCK=512, num_warps=8, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 256, 4, 4), (4096, 16, 4, 1))
buf2 = empty_strided_cuda((8,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_1[grid(8)](buf2, 8, XBLOCK
=8, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 256, 10, 10), (25600, 100, 10, 1),
torch.float32)
triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2[grid
(102400)](buf2, buf1, primals_3, buf3, 102400, XBLOCK=512,
num_warps=8, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 256, 8, 8), (16384, 64, 8, 1))
buf5 = empty_strided_cuda((4, 256, 10, 10), (25600, 100, 10, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_3[grid(102400)](buf4
, primals_5, buf5, 102400, XBLOCK=512, num_warps=8, num_stages=1)
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 256, 8, 8), (16384, 64, 8, 1))
buf7 = empty_strided_cuda((4, 256, 10, 10), (25600, 100, 10, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_3[grid(102400)](buf6
, primals_7, buf7, 102400, XBLOCK=512, num_warps=8, num_stages=1)
buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 256, 8, 8), (16384, 64, 8, 1))
buf9 = empty_strided_cuda((4, 256, 10, 10), (25600, 100, 10, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_3[grid(102400)](buf8
, primals_9, buf9, 102400, XBLOCK=512, num_warps=8, num_stages=1)
buf10 = extern_kernels.convolution(buf9, primals_10, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 128, 8, 8), (8192, 64, 8, 1))
buf11 = empty_strided_cuda((16,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_4[grid(16)](buf11, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1),
torch.float32)
triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5[grid
(165888)](buf11, buf10, primals_11, buf12, 165888, XBLOCK=512,
num_warps=8, num_stages=1)
buf13 = extern_kernels.convolution(buf12, primals_12, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 128, 16, 16), (32768, 256, 16, 1))
buf14 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_6[grid(165888)](
buf13, primals_13, buf14, 165888, XBLOCK=1024, num_warps=4,
num_stages=1)
buf15 = extern_kernels.convolution(buf14, primals_14, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 64, 16, 16), (16384, 256, 16, 1))
buf16 = empty_strided_cuda((32,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_7[grid(32)](buf16, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf17 = empty_strided_cuda((4, 64, 34, 34), (73984, 1156, 34, 1),
torch.float32)
triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_8[grid
(295936)](buf16, buf15, primals_15, buf17, 295936, XBLOCK=512,
num_warps=8, num_stages=1)
buf18 = extern_kernels.convolution(buf17, primals_16, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf19 = empty_strided_cuda((4, 64, 34, 34), (73984, 1156, 34, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_9[grid(295936)](
buf18, primals_17, buf19, 295936, XBLOCK=1024, num_warps=4,
num_stages=1)
buf20 = extern_kernels.convolution(buf19, primals_18, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 3, 32, 32), (3072, 1024, 32, 1))
buf21 = buf20
del buf20
triton_poi_fused_convolution_10[grid(12288)](buf21, primals_19,
12288, XBLOCK=256, num_warps=4, num_stages=1)
del primals_19
buf22 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_11[grid(262144)](
buf18, primals_17, buf22, 262144, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf18
del primals_17
buf23 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_12[grid(65536)](
buf15, primals_15, buf23, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del buf15
del primals_15
buf24 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_13[grid(131072)](
buf13, primals_13, buf24, 131072, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf13
del primals_13
buf25 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.bool
)
triton_poi_fused_convolution_relu_threshold_backward_14[grid(32768)](
buf10, primals_11, buf25, 32768, XBLOCK=128, num_warps=4,
num_stages=1)
del buf10
del primals_11
buf26 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.bool)
triton_poi_fused_convolution_relu_threshold_backward_15[grid(65536)](
buf8, primals_9, buf26, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del buf8
del primals_9
buf27 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.bool)
triton_poi_fused_convolution_relu_threshold_backward_15[grid(65536)](
buf6, primals_7, buf27, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del buf6
del primals_7
buf28 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.bool)
triton_poi_fused_convolution_relu_threshold_backward_15[grid(65536)](
buf4, primals_5, buf28, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del buf4
del primals_5
buf29 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.bool
)
triton_poi_fused_convolution_relu_threshold_backward_16[grid(16384)](
buf1, primals_3, buf29, 16384, XBLOCK=256, num_warps=4,
num_stages=1)
del buf1
del primals_3
return (buf21, primals_2, primals_4, primals_6, primals_8, primals_10,
primals_12, primals_14, primals_16, primals_18, buf0, buf2, buf3,
buf5, buf7, buf9, buf11, buf12, buf14, buf16, buf17, buf19, buf22,
buf23, buf24, buf25, buf26, buf27, buf28, buf29)
class DecoderNew(nn.Module):
def __init__(self):
super(DecoderNew, self).__init__()
self.pre11 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv11 = nn.Conv2d(in_channels=512, out_channels=256,
kernel_size=3, stride=1)
self.relu11 = nn.ReLU(inplace=True)
self.up1 = nn.Upsample(scale_factor=2, mode='nearest')
self.pre21 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv21 = nn.Conv2d(in_channels=256, out_channels=256,
kernel_size=3, stride=1)
self.relu21 = nn.ReLU(inplace=True)
self.pre22 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv22 = nn.Conv2d(in_channels=256, out_channels=256,
kernel_size=3, stride=1)
self.relu22 = nn.ReLU(inplace=True)
self.pre23 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv23 = nn.Conv2d(in_channels=256, out_channels=256,
kernel_size=3, stride=1)
self.relu23 = nn.ReLU(inplace=True)
self.pre24 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv24 = nn.Conv2d(in_channels=256, out_channels=128,
kernel_size=3, stride=1)
self.relu24 = nn.ReLU(inplace=True)
self.up2 = nn.Upsample(scale_factor=2, mode='nearest')
self.pre31 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv31 = nn.Conv2d(in_channels=128, out_channels=128,
kernel_size=3, stride=1)
self.relu31 = nn.ReLU(inplace=True)
self.pre32 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv32 = nn.Conv2d(in_channels=128, out_channels=64,
kernel_size=3, stride=1)
self.relu32 = nn.ReLU(inplace=True)
self.up3 = nn.Upsample(scale_factor=2, mode='nearest')
self.pre41 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv41 = nn.Conv2d(in_channels=64, out_channels=64,
kernel_size=3, stride=1)
self.relu41 = nn.ReLU(inplace=True)
self.pre42 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv42 = nn.Conv2d(in_channels=64, out_channels=3, kernel_size
=3, stride=1)
self.relu42 = nn.ReLU(inplace=True)
def forward(self, input_0):
primals_2 = self.conv11.weight
primals_3 = self.conv11.bias
primals_4 = self.conv21.weight
primals_5 = self.conv21.bias
primals_6 = self.conv22.weight
primals_7 = self.conv22.bias
primals_8 = self.conv23.weight
primals_9 = self.conv23.bias
primals_10 = self.conv24.weight
primals_11 = self.conv24.bias
primals_12 = self.conv31.weight
primals_13 = self.conv31.bias
primals_14 = self.conv32.weight
primals_15 = self.conv32.bias
primals_16 = self.conv41.weight
primals_17 = self.conv41.bias
primals_18 = self.conv42.weight
primals_19 = self.conv42.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19])
return output[0]
| ShiZhuming/StyleTransfer | Decoder | false | 17,941 | [
"MIT"
] | 10 | cba2a3ceb733a2d129d52d4a3cac07c7651bd928 | https://github.com/ShiZhuming/StyleTransfer/tree/cba2a3ceb733a2d129d52d4a3cac07c7651bd928 |
SH2Signal | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/c3/cc3bgood72cihjvnjciwgk4utiuai5bquinmgvrn2dutbovyrepi.py
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# y => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 256
xnumel = 15
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 64
y1 = (yindex // 64)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (64*x2) + (960*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (15*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ab/cabixjc5sn7ztuqw4eauuw7ag46puqi6zxdgujidt3relr5hrop3.py
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 64], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (256*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (64*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 1, 15, 4, 4, 4), (960, 960, 64, 16, 4, 1))
assert_size_stride(arg1_1, (15, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4, 4, 15), (960, 960, 240, 60, 15, 1), torch.float32)
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(arg0_1, buf0, 256, 15, grid=grid(256, 15), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (64, 4, 15), (60, 15, 1), 0), reinterpret_tensor(arg1_1, (64, 15, 4), (0, 4, 1), 0), out=buf1)
del arg1_1
del buf0
buf2 = empty_strided_cuda((4, 1, 4, 4, 4, 4), (256, 256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf1, buf2, 16, 64, grid=grid(16, 64), stream=stream0)
del buf1
return (reinterpret_tensor(buf2, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 1, 15, 4, 4, 4), (960, 960, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((15, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
from scipy import special as sci
def cart2sph(x, y, z):
"""
cart2sph(x, y, z) -> theta, phi, r
Computes the corresponding spherical coordinate of the given input parameters :attr:`x`, :attr:`y` and :attr:`x`.
Args:
x (Number): x position
y (Number): y position
z (Number): z position
Example::
>>> cart2sph(1, 1, 1)
(0.78539816339744828, 0.95531661812450919, 1.7320508075688772)
"""
azimuthal_angle = np.arctan2(y, x)
radial_distance = np.sqrt(x ** 2 + y ** 2 + z ** 2)
polar_angle = np.arccos(z / radial_distance)
return azimuthal_angle, polar_angle, radial_distance
class SH2Signal(nn.Module):
"""
SH2Signal(dwi_sh) -> dwi
Computes the corresponding dwi signal for each gradient
Args:
x_in (5D tensor): input spherical harmonic tensor
x_in.size(): (Batchsize x Number of shells*Number of coefficients x DimX x DimY x DimZ)
y (5D tensor): corresponding dwi tensor
y.size(): (Batchsize x Number of shells * Number of gradients x DimX x DimY x DimZ)
"""
def __init__(self, sh_order, gradients):
super(SH2Signal, self).__init__()
self.sh_order = sh_order
self.num_gradients = gradients.shape[0]
self.num_coefficients = int((self.sh_order + 1) * (self.sh_order /
2 + 1))
SH2SignalMat = np.zeros((self.num_coefficients, self.num_gradients))
for id_gradient in range(self.num_gradients):
id_coefficient = 0
for id_order in range(0, self.sh_order + 1, 2):
for id_degree in range(-id_order, id_order + 1):
gradients_phi, gradients_theta, _gradients_z = cart2sph(
gradients[id_gradient, 0], gradients[id_gradient, 1
], gradients[id_gradient, 2])
y = sci.sph_harm(np.abs(id_degree), id_order,
gradients_phi, gradients_theta)
if id_degree < 0:
SH2SignalMat[id_coefficient, id_gradient] = np.real(y
) * np.sqrt(2)
elif id_degree == 0:
SH2SignalMat[id_coefficient, id_gradient] = np.real(y)
elif id_degree > 0:
SH2SignalMat[id_coefficient, id_gradient] = np.imag(y
) * np.sqrt(2)
id_coefficient += 1
self.SH2SignalMat = torch.nn.Parameter(torch.from_numpy(
SH2SignalMat).float(), requires_grad=False)
def forward(self, x_in):
x_dim = x_in.size()
x = x_in.reshape((x_dim[0], np.ceil(x_in.size(1) / self.
num_coefficients).astype(int), self.num_coefficients, x_dim[-3],
x_dim[-2], x_dim[-1]))
x = x.permute(0, 1, 3, 4, 5, 2)
y = x.matmul(self.SH2SignalMat)
y = y.permute(0, 1, 5, 2, 3, 4).contiguous().reshape((x_dim[0], -1,
x_dim[-3], x_dim[-2], x_dim[-1]))
return y
def get_inputs():
return [torch.rand([4, 1, 15, 4, 4, 4])]
def get_init_inputs():
return [[], {'sh_order': 4, 'gradients': torch.rand([4, 4])}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy as np
import torch.nn as nn
from scipy import special as sci
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 256
xnumel = 15
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 64
y1 = yindex // 64
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 64 * x2 + 960 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 15 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 256 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 64 * y3), tmp0, xmask & ymask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 1, 15, 4, 4, 4), (960, 960, 64, 16, 4, 1))
assert_size_stride(arg1_1, (15, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4, 4, 15), (960, 960, 240, 60,
15, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256, 15)](arg0_1, buf0, 256, 15,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (64, 4, 15), (60, 15, 1
), 0), reinterpret_tensor(arg1_1, (64, 15, 4), (0, 4, 1), 0),
out=buf1)
del arg1_1
del buf0
buf2 = empty_strided_cuda((4, 1, 4, 4, 4, 4), (256, 256, 64, 16, 4,
1), torch.float32)
triton_poi_fused_clone_1[grid(16, 64)](buf1, buf2, 16, 64, XBLOCK=
64, YBLOCK=16, num_warps=4, num_stages=1)
del buf1
return reinterpret_tensor(buf2, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0),
def cart2sph(x, y, z):
"""
cart2sph(x, y, z) -> theta, phi, r
Computes the corresponding spherical coordinate of the given input parameters :attr:`x`, :attr:`y` and :attr:`x`.
Args:
x (Number): x position
y (Number): y position
z (Number): z position
Example::
>>> cart2sph(1, 1, 1)
(0.78539816339744828, 0.95531661812450919, 1.7320508075688772)
"""
azimuthal_angle = np.arctan2(y, x)
radial_distance = np.sqrt(x ** 2 + y ** 2 + z ** 2)
polar_angle = np.arccos(z / radial_distance)
return azimuthal_angle, polar_angle, radial_distance
class SH2SignalNew(nn.Module):
"""
SH2Signal(dwi_sh) -> dwi
Computes the corresponding dwi signal for each gradient
Args:
x_in (5D tensor): input spherical harmonic tensor
x_in.size(): (Batchsize x Number of shells*Number of coefficients x DimX x DimY x DimZ)
y (5D tensor): corresponding dwi tensor
y.size(): (Batchsize x Number of shells * Number of gradients x DimX x DimY x DimZ)
"""
def __init__(self, sh_order, gradients):
super(SH2SignalNew, self).__init__()
self.sh_order = sh_order
self.num_gradients = gradients.shape[0]
self.num_coefficients = int((self.sh_order + 1) * (self.sh_order /
2 + 1))
SH2SignalMat = np.zeros((self.num_coefficients, self.num_gradients))
for id_gradient in range(self.num_gradients):
id_coefficient = 0
for id_order in range(0, self.sh_order + 1, 2):
for id_degree in range(-id_order, id_order + 1):
gradients_phi, gradients_theta, _gradients_z = cart2sph(
gradients[id_gradient, 0], gradients[id_gradient, 1
], gradients[id_gradient, 2])
y = sci.sph_harm(np.abs(id_degree), id_order,
gradients_phi, gradients_theta)
if id_degree < 0:
SH2SignalMat[id_coefficient, id_gradient] = np.real(y
) * np.sqrt(2)
elif id_degree == 0:
SH2SignalMat[id_coefficient, id_gradient] = np.real(y)
elif id_degree > 0:
SH2SignalMat[id_coefficient, id_gradient] = np.imag(y
) * np.sqrt(2)
id_coefficient += 1
self.SH2SignalMat = torch.nn.Parameter(torch.from_numpy(
SH2SignalMat).float(), requires_grad=False)
def forward(self, input_0):
arg1_1 = self.SH2SignalMat
arg0_1 = input_0
output = call([arg0_1, arg1_1])
return output[0]
| SimonKoppers/DELIMIT | SH2Signal | false | 17,942 | [
"MIT"
] | 7 | d778a567bbec1beef2395ead60aa1e30086bb07c | https://github.com/SimonKoppers/DELIMIT/tree/d778a567bbec1beef2395ead60aa1e30086bb07c |
GraphResConvolution | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/yj/cyj7sixcwajn3iyu7vszggabfljk2rflmyjhrelhfiph26itcvxc.py
# Topologically Sorted Source Nodes: [output_1, output_1_relu], Original ATen: [aten.add, aten.relu]
# Source node to ATen node mapping:
# output_1 => add
# output_1_relu => relu
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %view_3), kwargs = {})
# %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {})
triton_poi_fused_add_relu_0 = async_compile.triton('triton_poi_fused_add_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_relu_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = tl.full([1], 0, tl.int32)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tl.store(in_out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/zl/czldhuomfekq3z7cdqt45bss3w7nomdnts2clgqh6s6vnbmeiia5.py
# Topologically Sorted Source Nodes: [output_2, output_2_res, output], Original ATen: [aten.add, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# output => relu_1
# output_2 => add_1
# output_2_res => add_2
# Graph fragment:
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_5, %view_7), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %primals_3), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_2,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_add_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_add_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*i1', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp11 = 0.0
tmp12 = tmp10 <= tmp11
tl.store(in_out_ptr0 + (x2), tmp10, xmask)
tl.store(out_ptr0 + (x2), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm]
extern_kernels.bmm(primals_4, primals_3, out=buf1)
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2)
del primals_5
buf3 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [output_1, output_1_relu], Original ATen: [aten.add, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_add_relu_0.run(buf3, primals_2, buf2, primals_6, 64, grid=grid(64), stream=stream0)
del primals_2
del primals_6
buf4 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [bmm_1], Original ATen: [aten.bmm]
extern_kernels.bmm(primals_4, buf3, out=buf5)
buf6 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf5, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf6)
buf7 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0); del buf4 # reuse
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [output_2, output_2_res, output], Original ATen: [aten.add, aten.relu, aten.threshold_backward]
triton_poi_fused_add_relu_threshold_backward_1.run(buf7, primals_8, buf6, primals_10, primals_3, buf8, 64, grid=grid(64), stream=stream0)
del buf6
del primals_10
del primals_8
return (buf7, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(buf1, (16, 4), (4, 1), 0), buf3, reinterpret_tensor(buf5, (16, 4), (4, 1), 0), buf8, primals_9, reinterpret_tensor(primals_4, (4, 4, 4), (16, 1, 4), 0), primals_7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import torch
import torch.autograd
import torch.nn as nn
from torch.nn.modules.module import Module
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907.
"""
def __init__(self, state_dim, name='', out_state_dim=None):
super(GraphConvolution, self).__init__()
self.state_dim = state_dim
if out_state_dim is None:
self.out_state_dim = state_dim
else:
self.out_state_dim = out_state_dim
self.fc1 = nn.Linear(in_features=self.state_dim, out_features=self.
out_state_dim)
self.fc2 = nn.Linear(in_features=self.state_dim, out_features=self.
out_state_dim)
self.name = name
def forward(self, input, adj):
state_in = self.fc1(input)
forward_input = self.fc2(torch.bmm(adj, input))
return state_in + forward_input
class GraphResConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907.
"""
def __init__(self, state_dim, name=''):
super(GraphResConvolution, self).__init__()
self.state_dim = state_dim
self.gcn_1 = GraphConvolution(state_dim, f'{name}_1')
self.gcn_2 = GraphConvolution(state_dim, f'{name}_2')
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
self.name = name
def forward(self, input, adj):
output_1 = self.gcn_1(input, adj)
output_1_relu = self.relu1(output_1)
output_2 = self.gcn_2(output_1_relu, adj)
output_2_res = output_2 + input
output = self.relu2(output_2_res)
return output
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.nn import Module
import torch.autograd
import torch.nn as nn
from torch.nn.modules.module import Module
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_relu_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = tl.full([1], 0, tl.int32)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tl.store(in_out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp11 = 0.0
tmp12 = tmp10 <= tmp11
tl.store(in_out_ptr0 + x2, tmp10, xmask)
tl.store(out_ptr0 + x2, tmp12, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(primals_4, primals_3, out=buf1)
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2)
del primals_5
buf3 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_add_relu_0[grid(64)](buf3, primals_2, buf2,
primals_6, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
del primals_6
buf4 = buf2
del buf2
extern_kernels.mm(reinterpret_tensor(buf3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(primals_4, buf3, out=buf5)
buf6 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf6)
buf7 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0)
del buf4
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_add_relu_threshold_backward_1[grid(64)](buf7,
primals_8, buf6, primals_10, primals_3, buf8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf6
del primals_10
del primals_8
return buf7, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0
), reinterpret_tensor(buf1, (16, 4), (4, 1), 0
), buf3, reinterpret_tensor(buf5, (16, 4), (4, 1), 0
), buf8, primals_9, reinterpret_tensor(primals_4, (4, 4, 4), (16, 1,
4), 0), primals_7
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907.
"""
def __init__(self, state_dim, name='', out_state_dim=None):
super(GraphConvolution, self).__init__()
self.state_dim = state_dim
if out_state_dim is None:
self.out_state_dim = state_dim
else:
self.out_state_dim = out_state_dim
self.fc1 = nn.Linear(in_features=self.state_dim, out_features=self.
out_state_dim)
self.fc2 = nn.Linear(in_features=self.state_dim, out_features=self.
out_state_dim)
self.name = name
def forward(self, input, adj):
state_in = self.fc1(input)
forward_input = self.fc2(torch.bmm(adj, input))
return state_in + forward_input
class GraphResConvolutionNew(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907.
"""
def __init__(self, state_dim, name=''):
super(GraphResConvolutionNew, self).__init__()
self.state_dim = state_dim
self.gcn_1 = GraphConvolution(state_dim, f'{name}_1')
self.gcn_2 = GraphConvolution(state_dim, f'{name}_2')
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
self.name = name
def forward(self, input_0, input_1):
primals_1 = self.gcn_1.fc1.weight
primals_2 = self.gcn_1.fc1.bias
primals_5 = self.gcn_1.fc2.weight
primals_6 = self.gcn_1.fc2.bias
primals_7 = self.gcn_2.fc1.weight
primals_8 = self.gcn_2.fc1.bias
primals_9 = self.gcn_2.fc2.weight
primals_10 = self.gcn_2.fc2.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
| SowmyaAitha/Palmira | GraphResConvolution | false | 17,943 | [
"MIT"
] | 6 | c3ae884e35b8b3703a5e4ba52d7b0bdae6da1bad | https://github.com/SowmyaAitha/Palmira/tree/c3ae884e35b8b3703a5e4ba52d7b0bdae6da1bad |
SharedDropoutMLP | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/5m/c5mqjkgr5v5727r267mbxh7dsgbrwetftfoe6333a3eri2wcmk42.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x_1 => gt, mul, where
# Graph fragment:
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.1), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul), kwargs = {})
triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 256, grid=grid(256), stream=stream0)
del buf0
del primals_2
return (buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class SharedDropout(nn.Module):
"""
SharedDropout differs from the vanilla dropout strategy in that
the dropout mask is shared across one dimension.
Args:
p (float):
The probability of an element to be zeroed. Default: 0.5.
batch_first (bool):
If ``True``, the input and output tensors are provided as ``[batch_size, seq_len, *]``.
Default: ``True``.
Examples:
>>> x = torch.ones(1, 3, 5)
>>> nn.Dropout()(x)
tensor([[[0., 2., 2., 0., 0.],
[2., 2., 0., 2., 2.],
[2., 2., 2., 2., 0.]]])
>>> SharedDropout()(x)
tensor([[[2., 0., 2., 0., 2.],
[2., 0., 2., 0., 2.],
[2., 0., 2., 0., 2.]]])
Reference:
- https://github.com/yzhangcs/parser/blob/main/supar/modules/dropout.py
"""
def __init__(self, p=0.5, batch_first=True):
super().__init__()
self.p = p
self.batch_first = batch_first
def __repr__(self):
s = f'p={self.p}'
if self.batch_first:
s += f', batch_first={self.batch_first}'
return f'{self.__class__.__name__}({s})'
def forward(self, x):
"""
Args:
x (~torch.Tensor):
A tensor of any shape.
Returns:
The returned tensor is of the same shape as `x`.
"""
if self.training:
if self.batch_first:
mask = self.get_mask(x[:, 0], self.p).unsqueeze(1)
else:
mask = self.get_mask(x[0], self.p)
x = x * mask
return x
@staticmethod
def get_mask(x, p):
return x.new_empty(x.shape).bernoulli_(1 - p) / (1 - p)
class SharedDropoutMLP(nn.Module):
"""
Applies a linear transformation together with a non-linear activation to the incoming tensor:
:math:`y = \\mathrm{Activation}(x A^T + b)`
Args:
n_in (~torch.Tensor):
The size of each input feature.
n_out (~torch.Tensor):
The size of each output feature.
dropout (float):
If non-zero, introduce a :class:`SharedDropout` layer on the output with this dropout ratio. Default: 0.
activation (bool):
Whether to use activations. Default: True.
"""
def __init__(self, n_in, n_out, dropout=0, activation=True):
super().__init__()
self.n_in = n_in
self.n_out = n_out
self.linear = nn.Linear(n_in, n_out)
self.activation = nn.LeakyReLU(negative_slope=0.1
) if activation else nn.Identity()
self.dropout = SharedDropout(p=dropout)
self.reset_parameters()
def __repr__(self):
s = f'n_in={self.n_in}, n_out={self.n_out}'
if self.dropout.p > 0:
s += f', dropout={self.dropout.p}'
return f'{self.__class__.__name__}({s})'
def reset_parameters(self):
nn.init.orthogonal_(self.linear.weight)
nn.init.zeros_(self.linear.bias)
def forward(self, x):
"""
Args:
x (~torch.Tensor):
The size of each input feature is `n_in`.
Returns:
A tensor with the size of each output feature `n_out`.
"""
x = self.linear(x)
x = self.activation(x)
x = self.dropout(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_in': 4, 'n_out': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(256)](buf0, primals_2, buf1,
buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del primals_2
return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1
class SharedDropout(nn.Module):
"""
SharedDropout differs from the vanilla dropout strategy in that
the dropout mask is shared across one dimension.
Args:
p (float):
The probability of an element to be zeroed. Default: 0.5.
batch_first (bool):
If ``True``, the input and output tensors are provided as ``[batch_size, seq_len, *]``.
Default: ``True``.
Examples:
>>> x = torch.ones(1, 3, 5)
>>> nn.Dropout()(x)
tensor([[[0., 2., 2., 0., 0.],
[2., 2., 0., 2., 2.],
[2., 2., 2., 2., 0.]]])
>>> SharedDropout()(x)
tensor([[[2., 0., 2., 0., 2.],
[2., 0., 2., 0., 2.],
[2., 0., 2., 0., 2.]]])
Reference:
- https://github.com/yzhangcs/parser/blob/main/supar/modules/dropout.py
"""
def __init__(self, p=0.5, batch_first=True):
super().__init__()
self.p = p
self.batch_first = batch_first
def __repr__(self):
s = f'p={self.p}'
if self.batch_first:
s += f', batch_first={self.batch_first}'
return f'{self.__class__.__name__}({s})'
def forward(self, x):
"""
Args:
x (~torch.Tensor):
A tensor of any shape.
Returns:
The returned tensor is of the same shape as `x`.
"""
if self.training:
if self.batch_first:
mask = self.get_mask(x[:, 0], self.p).unsqueeze(1)
else:
mask = self.get_mask(x[0], self.p)
x = x * mask
return x
@staticmethod
def get_mask(x, p):
return x.new_empty(x.shape).bernoulli_(1 - p) / (1 - p)
class SharedDropoutMLPNew(nn.Module):
"""
Applies a linear transformation together with a non-linear activation to the incoming tensor:
:math:`y = \\mathrm{Activation}(x A^T + b)`
Args:
n_in (~torch.Tensor):
The size of each input feature.
n_out (~torch.Tensor):
The size of each output feature.
dropout (float):
If non-zero, introduce a :class:`SharedDropout` layer on the output with this dropout ratio. Default: 0.
activation (bool):
Whether to use activations. Default: True.
"""
def __init__(self, n_in, n_out, dropout=0, activation=True):
super().__init__()
self.n_in = n_in
self.n_out = n_out
self.linear = nn.Linear(n_in, n_out)
self.activation = nn.LeakyReLU(negative_slope=0.1
) if activation else nn.Identity()
self.dropout = SharedDropout(p=dropout)
self.reset_parameters()
def __repr__(self):
s = f'n_in={self.n_in}, n_out={self.n_out}'
if self.dropout.p > 0:
s += f', dropout={self.dropout.p}'
return f'{self.__class__.__name__}({s})'
def reset_parameters(self):
nn.init.orthogonal_(self.linear.weight)
nn.init.zeros_(self.linear.bias)
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| Spico197/REx | SharedDropoutMLP | false | 17,944 | [
"MIT"
] | 4 | bb3cdb845765a63e9bd18070068af52a1b2db3f3 | https://github.com/Spico197/REx/tree/bb3cdb845765a63e9bd18070068af52a1b2db3f3 |
SubjObjSpan | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/jy/cjyo6wznwyxjcbgbrtzowafvqjuf4wurgzubrlevuj4frs7o2i2v.py
# Topologically Sorted Source Nodes: [subj_head], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# subj_head => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = (xindex // 256)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/cv/ccvhrqgy6up7hx57qfk5mtwrcuea272slaim2a6wpmvygvxopiyu.py
# Topologically Sorted Source Nodes: [subj_head], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# subj_head => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 256
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/yu/cyuauoh4e7xdriyx4r4emjbvagsdb5i2fdrbi7vszycpbnfpp4wc.py
# Topologically Sorted Source Nodes: [add, sub, encoded_text], Original ATen: [aten.add, aten.div]
# Source node to ATen node mapping:
# add => add
# encoded_text => add_1
# sub => div
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_6, %view_9), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, 2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %div), kwargs = {})
triton_poi_fused_add_div_2 = async_compile.triton('triton_poi_fused_add_div_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 256
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tmp1 + tmp2
tmp4 = 0.5
tmp5 = tmp3 * tmp4
tmp6 = tmp0 + tmp5
tl.store(in_out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 4), (4, 1))
assert_size_stride(primals_5, (1, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [subj_head_out], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_1
del primals_2
buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [subj_tail_out], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_4
del primals_5
buf4 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [subj_head], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(primals_6, buf4, 1024, grid=grid(1024), stream=stream0)
del primals_6
buf5 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [subj_head], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(primals_3, buf5, 1024, grid=grid(1024), stream=stream0)
buf6 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [subj_head], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf4, (64, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf5, (64, 4, 4), (16, 4, 1), 0), out=buf6)
buf7 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [subj_tail], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(primals_7, buf7, 1024, grid=grid(1024), stream=stream0)
del primals_7
buf8 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [subj_tail], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf7, (64, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf5, (64, 4, 4), (16, 4, 1), 0), out=buf8)
del buf5
buf9 = reinterpret_tensor(buf6, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [add, sub, encoded_text], Original ATen: [aten.add, aten.div]
triton_poi_fused_add_div_2.run(buf9, primals_3, buf8, 1024, grid=grid(1024), stream=stream0)
buf10 = reinterpret_tensor(buf8, (256, 4), (4, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [pred_obj_heads], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, reinterpret_tensor(buf9, (256, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf10)
del primals_8
del primals_9
buf11 = reinterpret_tensor(buf7, (256, 4), (4, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [pred_obj_tails], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, reinterpret_tensor(buf9, (256, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf11)
del primals_10
del primals_11
return (reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), reinterpret_tensor(buf11, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf9, (256, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
from typing import Iterable
from typing import Optional
import torch.nn as nn
def find_closest_span_pairs(head: 'Iterable', tail: 'Iterable', backtrace:
'Optional[bool]'=True):
"""
Find all span pairs.
Args:
head: list of start position predictions, either 1 or 0
tail: list of end position predictions, either 1 or 0
backtrace: if there are more tail predictions than head predictions,
then backtrace to find a closest head position to get a span pair
Examples:
>>> head = torch.tensor([1, 0, 0, 1, 0, 0, 1], dtype=torch.long)
>>> tail = torch.tensor([0, 1, 0, 1, 0, 1, 1], dtype=torch.long)
>>> find_closest_span_pairs(head, tail, backtrace=False)
[(0, 1), (3, 3), (6, 6)]
>>> find_closest_span_pairs(head, tail, backtrace=True)
[(0, 1), (3, 3), (6, 6), (3, 5)]
"""
if isinstance(head, torch.Tensor):
head = head.detach().cpu()
if isinstance(tail, torch.Tensor):
tail = tail.detach().cpu()
head_valid_poses = np.where(head == 1)[0]
tail_valid_poses = np.where(tail == 1)[0]
tail_used_poses = {pos: (False) for pos in tail_valid_poses.tolist()}
pairs = []
for head_i in head_valid_poses:
tail_js = tail_valid_poses[tail_valid_poses >= head_i]
if len(tail_js) > 0:
tail_j = tail_js[0]
tail_used_poses[tail_j] = True
pairs.append((head_i, tail_j))
if backtrace:
for tail_j in tail_used_poses:
if tail_used_poses[tail_j] is False:
head_is = head_valid_poses[head_valid_poses <= tail_j]
if len(head_is) > 0:
head_i = head_is[-1]
pairs.append((head_i, tail_j))
return pairs
def find_closest_span_pairs_with_index(heads: 'Iterable', tails: 'Iterable',
backtrace: 'Optional[bool]'=True):
"""
Find all possible pairs with indexes,
useful for object discoveries with class idx.
Args:
heads: batch of torch.Tensor
tails: batch of torch.Tensor
backtrace: if there are more tail predictions than head predictions,
then backtrace to find a closest head position to get a span pair
Examples:
>>> heads = torch.tensor([[1, 0, 0, 1, 0, 0, 1], [1, 0, 0, 1, 0, 0, 1]], dtype=torch.long)
>>> tails = torch.tensor([[0, 1, 0, 1, 0, 1, 1], [0, 1, 0, 0, 0, 1, 0]], dtype=torch.long)
>>> find_closest_span_pairs(heads, tails, backtrace=False)
[(0, 0, 1), (0, 3, 3), (0, 6, 6), (1, 0, 1), (1, 3, 5)]
>>> find_closest_span_pairs(heads, tails, backtrace=True)
[(0, 0, 1), (0, 3, 3), (0, 6, 6), (0, 3, 5), (1, 0, 1), (1, 3, 5)]
"""
results = []
for idx, (head, tail) in enumerate(zip(heads, tails)):
pairs = find_closest_span_pairs(head, tail, backtrace=backtrace)
for pair in pairs:
results.append((idx, pair[0], pair[1]))
return results
class SubjObjSpan(nn.Module):
"""
Inputs:
hidden: (batch_size, seq_len, hidden_size)
one_subj_head: object golden head with one subject (batch_size, hidden_size)
one_subj_tail: object golden tail with one subject (batch_size, hidden_size)
"""
def __init__(self, hidden_size, num_classes, threshold:
'Optional[float]'=0.5):
super().__init__()
self.threshold = threshold
self.subj_head_ffnn = nn.Linear(hidden_size, 1)
self.subj_tail_ffnn = nn.Linear(hidden_size, 1)
self.obj_head_ffnn = nn.Linear(hidden_size, num_classes)
self.obj_tail_ffnn = nn.Linear(hidden_size, num_classes)
def get_objs_for_specific_subj(self, subj_head_mapping,
subj_tail_mapping, hidden):
subj_head = torch.matmul(subj_head_mapping, hidden)
subj_tail = torch.matmul(subj_tail_mapping, hidden)
sub = (subj_head + subj_tail) / 2
encoded_text = hidden + sub
pred_obj_heads = self.obj_head_ffnn(encoded_text)
pred_obj_tails = self.obj_tail_ffnn(encoded_text)
return pred_obj_heads, pred_obj_tails
def build_mapping(self, subj_heads, subj_tails):
"""
Build head & tail mapping for predicted subjects,
for each instance in a batch, for a subject in all
the predicted subjects, return a single subject
and its corresponding mappings.
"""
for subj_head, subj_tail in zip(subj_heads, subj_tails):
subjs = find_closest_span_pairs(subj_head, subj_tail)
seq_len = subj_head.shape[0]
for subj in subjs:
subj_head_mapping = torch.zeros(seq_len, device=subj_head.
device)
subj_tail_mapping = torch.zeros(seq_len, device=subj_tail.
device)
subj_head_mapping[subj[0]] = 1.0
subj_tail_mapping[subj[1]] = 1.0
yield subj, subj_head_mapping, subj_tail_mapping
def build_batch_mapping(self, subj_head, subj_tail):
"""
Build head & tail mapping for predicted subjects,
for each instance in a batch, return all the predicted
subjects and mappings.
"""
subjs = find_closest_span_pairs(subj_head, subj_tail)
seq_len = subj_head.shape[0]
if len(subjs) > 0:
subjs_head_mapping = torch.zeros(len(subjs), seq_len, device=
subj_head.device)
subjs_tail_mapping = torch.zeros(len(subjs), seq_len, device=
subj_tail.device)
for subj_idx, subj in enumerate(subjs):
subjs_head_mapping[subj_idx, subj[0]] = 1.0
subjs_tail_mapping[subj_idx, subj[1]] = 1.0
return subjs, subjs_head_mapping, subjs_tail_mapping
else:
return None, None, None
def forward(self, hidden, subj_head, subj_tail):
subj_head_out = self.subj_head_ffnn(hidden)
subj_tail_out = self.subj_tail_ffnn(hidden)
obj_head_out, obj_tail_out = self.get_objs_for_specific_subj(subj_head
.unsqueeze(1), subj_tail.unsqueeze(1), hidden)
return subj_head_out.squeeze(-1), subj_tail_out.squeeze(-1
), obj_head_out, obj_tail_out
def predict(self, hidden):
if hidden.shape[0] != 1:
raise RuntimeError(
f'eval batch size must be 1 x hidden_size, while hidden is {hidden.shape}'
)
subj_head_out = self.subj_head_ffnn(hidden)
subj_tail_out = self.subj_tail_ffnn(hidden)
subj_head_out = torch.sigmoid(subj_head_out)
subj_tail_out = torch.sigmoid(subj_tail_out)
pred_subj_head = subj_head_out.ge(self.threshold).long()
pred_subj_tail = subj_tail_out.ge(self.threshold).long()
triples = []
subjs, subj_head_mappings, subj_tail_mappings = (self.
build_batch_mapping(pred_subj_head.squeeze(0).squeeze(-1),
pred_subj_tail.squeeze(0).squeeze(-1)))
if subjs:
obj_head_out, obj_tail_out = self.get_objs_for_specific_subj(
subj_head_mappings.unsqueeze(1), subj_tail_mappings.
unsqueeze(1), hidden)
obj_head_out = torch.sigmoid(obj_head_out)
obj_tail_out = torch.sigmoid(obj_tail_out)
obj_head_out = obj_head_out.ge(self.threshold).long()
obj_tail_out = obj_tail_out.ge(self.threshold).long()
for subj_idx, subj in enumerate(subjs):
objs = find_closest_span_pairs_with_index(obj_head_out[
subj_idx].permute(1, 0), obj_tail_out[subj_idx].permute
(1, 0))
for relation_idx, obj_pair_start, obj_pair_end in objs:
triples.append(((subj[0], subj[1] + 1), relation_idx, (
obj_pair_start, obj_pair_end + 1)))
return [triples]
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4, 'num_classes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy as np
from typing import Iterable
from typing import Optional
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = xindex // 256
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 256
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_div_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 256
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tmp1 + tmp2
tmp4 = 0.5
tmp5 = tmp3 * tmp4
tmp6 = tmp0 + tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 4), (4, 1))
assert_size_stride(primals_5, (1,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_1
del primals_2
buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0
), alpha=1, beta=1, out=buf3)
del primals_4
del primals_5
buf4 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(1024)](primals_6, buf4, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_6
buf5 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_clone_1[grid(1024)](primals_3, buf5, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
buf6 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf4, (64, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf5, (64, 4, 4), (16, 4, 1), 0), out=buf6)
buf7 = buf4
del buf4
triton_poi_fused_clone_0[grid(1024)](primals_7, buf7, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_7
buf8 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf7, (64, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf5, (64, 4, 4), (16, 4, 1), 0), out=buf8)
del buf5
buf9 = reinterpret_tensor(buf6, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0
)
del buf6
triton_poi_fused_add_div_2[grid(1024)](buf9, primals_3, buf8, 1024,
XBLOCK=256, num_warps=4, num_stages=1)
buf10 = reinterpret_tensor(buf8, (256, 4), (4, 1), 0)
del buf8
extern_kernels.addmm(primals_9, reinterpret_tensor(buf9, (256, 4),
(4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf10)
del primals_8
del primals_9
buf11 = reinterpret_tensor(buf7, (256, 4), (4, 1), 0)
del buf7
extern_kernels.addmm(primals_11, reinterpret_tensor(buf9, (256, 4),
(4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf11)
del primals_10
del primals_11
return reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(buf10, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0
), reinterpret_tensor(buf11, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf9, (256, 4), (4, 1), 0)
def find_closest_span_pairs(head: 'Iterable', tail: 'Iterable', backtrace:
'Optional[bool]'=True):
"""
Find all span pairs.
Args:
head: list of start position predictions, either 1 or 0
tail: list of end position predictions, either 1 or 0
backtrace: if there are more tail predictions than head predictions,
then backtrace to find a closest head position to get a span pair
Examples:
>>> head = torch.tensor([1, 0, 0, 1, 0, 0, 1], dtype=torch.long)
>>> tail = torch.tensor([0, 1, 0, 1, 0, 1, 1], dtype=torch.long)
>>> find_closest_span_pairs(head, tail, backtrace=False)
[(0, 1), (3, 3), (6, 6)]
>>> find_closest_span_pairs(head, tail, backtrace=True)
[(0, 1), (3, 3), (6, 6), (3, 5)]
"""
if isinstance(head, torch.Tensor):
head = head.detach().cpu()
if isinstance(tail, torch.Tensor):
tail = tail.detach().cpu()
head_valid_poses = np.where(head == 1)[0]
tail_valid_poses = np.where(tail == 1)[0]
tail_used_poses = {pos: (False) for pos in tail_valid_poses.tolist()}
pairs = []
for head_i in head_valid_poses:
tail_js = tail_valid_poses[tail_valid_poses >= head_i]
if len(tail_js) > 0:
tail_j = tail_js[0]
tail_used_poses[tail_j] = True
pairs.append((head_i, tail_j))
if backtrace:
for tail_j in tail_used_poses:
if tail_used_poses[tail_j] is False:
head_is = head_valid_poses[head_valid_poses <= tail_j]
if len(head_is) > 0:
head_i = head_is[-1]
pairs.append((head_i, tail_j))
return pairs
def find_closest_span_pairs_with_index(heads: 'Iterable', tails: 'Iterable',
backtrace: 'Optional[bool]'=True):
"""
Find all possible pairs with indexes,
useful for object discoveries with class idx.
Args:
heads: batch of torch.Tensor
tails: batch of torch.Tensor
backtrace: if there are more tail predictions than head predictions,
then backtrace to find a closest head position to get a span pair
Examples:
>>> heads = torch.tensor([[1, 0, 0, 1, 0, 0, 1], [1, 0, 0, 1, 0, 0, 1]], dtype=torch.long)
>>> tails = torch.tensor([[0, 1, 0, 1, 0, 1, 1], [0, 1, 0, 0, 0, 1, 0]], dtype=torch.long)
>>> find_closest_span_pairs(heads, tails, backtrace=False)
[(0, 0, 1), (0, 3, 3), (0, 6, 6), (1, 0, 1), (1, 3, 5)]
>>> find_closest_span_pairs(heads, tails, backtrace=True)
[(0, 0, 1), (0, 3, 3), (0, 6, 6), (0, 3, 5), (1, 0, 1), (1, 3, 5)]
"""
results = []
for idx, (head, tail) in enumerate(zip(heads, tails)):
pairs = find_closest_span_pairs(head, tail, backtrace=backtrace)
for pair in pairs:
results.append((idx, pair[0], pair[1]))
return results
class SubjObjSpanNew(nn.Module):
"""
Inputs:
hidden: (batch_size, seq_len, hidden_size)
one_subj_head: object golden head with one subject (batch_size, hidden_size)
one_subj_tail: object golden tail with one subject (batch_size, hidden_size)
"""
def __init__(self, hidden_size, num_classes, threshold:
'Optional[float]'=0.5):
super().__init__()
self.threshold = threshold
self.subj_head_ffnn = nn.Linear(hidden_size, 1)
self.subj_tail_ffnn = nn.Linear(hidden_size, 1)
self.obj_head_ffnn = nn.Linear(hidden_size, num_classes)
self.obj_tail_ffnn = nn.Linear(hidden_size, num_classes)
def get_objs_for_specific_subj(self, subj_head_mapping,
subj_tail_mapping, hidden):
subj_head = torch.matmul(subj_head_mapping, hidden)
subj_tail = torch.matmul(subj_tail_mapping, hidden)
sub = (subj_head + subj_tail) / 2
encoded_text = hidden + sub
pred_obj_heads = self.obj_head_ffnn(encoded_text)
pred_obj_tails = self.obj_tail_ffnn(encoded_text)
return pred_obj_heads, pred_obj_tails
def build_mapping(self, subj_heads, subj_tails):
"""
Build head & tail mapping for predicted subjects,
for each instance in a batch, for a subject in all
the predicted subjects, return a single subject
and its corresponding mappings.
"""
for subj_head, subj_tail in zip(subj_heads, subj_tails):
subjs = find_closest_span_pairs(subj_head, subj_tail)
seq_len = subj_head.shape[0]
for subj in subjs:
subj_head_mapping = torch.zeros(seq_len, device=subj_head.
device)
subj_tail_mapping = torch.zeros(seq_len, device=subj_tail.
device)
subj_head_mapping[subj[0]] = 1.0
subj_tail_mapping[subj[1]] = 1.0
yield subj, subj_head_mapping, subj_tail_mapping
def build_batch_mapping(self, subj_head, subj_tail):
"""
Build head & tail mapping for predicted subjects,
for each instance in a batch, return all the predicted
subjects and mappings.
"""
subjs = find_closest_span_pairs(subj_head, subj_tail)
seq_len = subj_head.shape[0]
if len(subjs) > 0:
subjs_head_mapping = torch.zeros(len(subjs), seq_len, device=
subj_head.device)
subjs_tail_mapping = torch.zeros(len(subjs), seq_len, device=
subj_tail.device)
for subj_idx, subj in enumerate(subjs):
subjs_head_mapping[subj_idx, subj[0]] = 1.0
subjs_tail_mapping[subj_idx, subj[1]] = 1.0
return subjs, subjs_head_mapping, subjs_tail_mapping
else:
return None, None, None
def predict(self, hidden):
if hidden.shape[0] != 1:
raise RuntimeError(
f'eval batch size must be 1 x hidden_size, while hidden is {hidden.shape}'
)
subj_head_out = self.subj_head_ffnn(hidden)
subj_tail_out = self.subj_tail_ffnn(hidden)
subj_head_out = torch.sigmoid(subj_head_out)
subj_tail_out = torch.sigmoid(subj_tail_out)
pred_subj_head = subj_head_out.ge(self.threshold).long()
pred_subj_tail = subj_tail_out.ge(self.threshold).long()
triples = []
subjs, subj_head_mappings, subj_tail_mappings = (self.
build_batch_mapping(pred_subj_head.squeeze(0).squeeze(-1),
pred_subj_tail.squeeze(0).squeeze(-1)))
if subjs:
obj_head_out, obj_tail_out = self.get_objs_for_specific_subj(
subj_head_mappings.unsqueeze(1), subj_tail_mappings.
unsqueeze(1), hidden)
obj_head_out = torch.sigmoid(obj_head_out)
obj_tail_out = torch.sigmoid(obj_tail_out)
obj_head_out = obj_head_out.ge(self.threshold).long()
obj_tail_out = obj_tail_out.ge(self.threshold).long()
for subj_idx, subj in enumerate(subjs):
objs = find_closest_span_pairs_with_index(obj_head_out[
subj_idx].permute(1, 0), obj_tail_out[subj_idx].permute
(1, 0))
for relation_idx, obj_pair_start, obj_pair_end in objs:
triples.append(((subj[0], subj[1] + 1), relation_idx, (
obj_pair_start, obj_pair_end + 1)))
return [triples]
def forward(self, input_0, input_1, input_2):
primals_1 = self.subj_head_ffnn.weight
primals_2 = self.subj_head_ffnn.bias
primals_4 = self.subj_tail_ffnn.weight
primals_5 = self.subj_tail_ffnn.bias
primals_8 = self.obj_head_ffnn.weight
primals_9 = self.obj_head_ffnn.bias
primals_10 = self.obj_tail_ffnn.weight
primals_11 = self.obj_tail_ffnn.bias
primals_3 = input_0
primals_6 = input_1
primals_7 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0], output[1], output[2], output[3]
| Spico197/REx | SubjObjSpan | false | 17,945 | [
"MIT"
] | 4 | bb3cdb845765a63e9bd18070068af52a1b2db3f3 | https://github.com/Spico197/REx/tree/bb3cdb845765a63e9bd18070068af52a1b2db3f3 |
make_dense | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/zk/czk5od6c6offvbney2befszd7v5tyhkxi77vmdn7eszfbs2x6yjf.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# out_1 => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_2, %where], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 8
x0 = xindex % 16
x2 = (xindex // 128)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp6 & xmask, other=0.0)
tmp10 = 0.0
tmp11 = tmp9 > tmp10
tmp12 = 0.1
tmp13 = tmp9 * tmp12
tmp14 = tl.where(tmp11, tmp9, tmp13)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp6, tmp14, tmp15)
tmp17 = tl.where(tmp4, tmp5, tmp16)
tl.store(out_ptr0 + (x3), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/fr/cfrjnzgf3i45hzj6pygth24amshdw5ecafvs3ysbg2b3yambrsig.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.leaky_relu, aten.leaky_relu_backward]
# Source node to ATen node mapping:
# out => gt, mul, where
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.1), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
# %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where, 0), kwargs = {})
triton_poi_fused_leaky_relu_leaky_relu_backward_1 = async_compile.triton('triton_poi_fused_leaky_relu_leaky_relu_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_leaky_relu_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_leaky_relu_backward_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.1
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = tmp5 > tmp1
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_2, buf0, buf1, 512, grid=grid(512), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_leaky_relu_leaky_relu_backward_1.run(buf0, buf2, 256, grid=grid(256), stream=stream0)
del buf0
return (buf1, primals_1, primals_2, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.model_zoo
class make_dense(nn.Module):
def __init__(self, channels_in, channels_out, kernel_size=3):
super(make_dense, self).__init__()
self.leaky_relu = nn.LeakyReLU(0.1, inplace=True)
self.conv = nn.Conv2d(channels_in, channels_out, kernel_size=
kernel_size, padding=(kernel_size - 1) // 2, bias=False)
def forward(self, x):
out = self.leaky_relu(self.conv(x))
out = torch.cat((x, out), 1)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels_in': 4, 'channels_out': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.model_zoo
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask,
other=0.0)
tmp10 = 0.0
tmp11 = tmp9 > tmp10
tmp12 = 0.1
tmp13 = tmp9 * tmp12
tmp14 = tl.where(tmp11, tmp9, tmp13)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp6, tmp14, tmp15)
tmp17 = tl.where(tmp4, tmp5, tmp16)
tl.store(out_ptr0 + x3, tmp17, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_leaky_relu_backward_1(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.1
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = tmp5 > tmp1
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_2, buf0, buf1, 512,
XBLOCK=128, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_leaky_relu_leaky_relu_backward_1[grid(256)](buf0,
buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
return buf1, primals_1, primals_2, buf2
class make_denseNew(nn.Module):
def __init__(self, channels_in, channels_out, kernel_size=3):
super(make_denseNew, self).__init__()
self.leaky_relu = nn.LeakyReLU(0.1, inplace=True)
self.conv = nn.Conv2d(channels_in, channels_out, kernel_size=
kernel_size, padding=(kernel_size - 1) // 2, bias=False)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| SeleSchaefer/super_resolution | make_dense | false | 17,946 | [
"MIT"
] | 5 | bf28a959fb150ceeadbd9f0bcfc12f3025cf82f4 | https://github.com/SeleSchaefer/super_resolution/tree/bf28a959fb150ceeadbd9f0bcfc12f3025cf82f4 |
CE_loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/7u/c7ulwjup27sxyxjpogclhonelnizz2kva4gfu43pslpuuvhen63z.py
# Topologically Sorted Source Nodes: [cross_entropy], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# cross_entropy => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %amax), kwargs = {})
triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = (xindex // 64)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((16*x1) + (64*(x0 // 16)) + (x0 % 16)), xmask)
tmp1 = tl.load(in_ptr0 + ((64*(x0 // 16)) + (x0 % 16)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + (64*(x0 // 16)) + (x0 % 16)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + (64*(x0 // 16)) + (x0 % 16)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + (64*(x0 // 16)) + (x0 % 16)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/wo/cwogsot7hgdaivsauedopds42pxlfo6doqwyo3izhh4xboeclhpu.py
# Topologically Sorted Source Nodes: [max_1, cross_entropy], Original ATen: [aten.max, aten.nll_loss_forward]
# Source node to ATen node mapping:
# cross_entropy => convert_element_type, div, full_default_1, ne_1, ne_2, neg, sum_2, sum_3, where_1
# max_1 => max_1
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%view_1, 1), kwargs = {})
# %ne_1 : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%getitem_1, -100), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%squeeze,), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%ne_1, %neg, %full_default_1), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%where_1,), kwargs = {})
# %ne_2 : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%getitem_1, -100), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%ne_2,), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%sum_2, torch.float32), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, %convert_element_type), kwargs = {})
triton_per_fused_max_nll_loss_forward_1 = async_compile.triton('triton_per_fused_max_nll_loss_forward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_max_nll_loss_forward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_max_nll_loss_forward_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + ((64*(r0 // 16)) + (r0 % 16)), None)
tmp1 = tl.load(in_ptr0 + (16 + (64*(r0 // 16)) + (r0 % 16)), None)
tmp17 = tl.load(in_ptr0 + (32 + (64*(r0 // 16)) + (r0 % 16)), None)
tmp32 = tl.load(in_ptr0 + (48 + (64*(r0 // 16)) + (r0 % 16)), None)
tmp56 = tl.load(in_ptr1 + (r0), None)
tmp58 = tl.load(in_ptr1 + (64 + r0), None)
tmp61 = tl.load(in_ptr1 + (128 + r0), None)
tmp64 = tl.load(in_ptr1 + (192 + r0), None)
tmp2 = tmp0 > tmp1
tmp3 = tmp0 == tmp1
tmp4 = tmp0 != tmp0
tmp5 = tmp1 != tmp1
tmp6 = tmp4 > tmp5
tmp7 = tmp2 | tmp6
tmp8 = tmp4 & tmp5
tmp9 = tmp3 | tmp8
tmp10 = tl.full([1, 1], 0, tl.int64)
tmp11 = tl.full([1, 1], 1, tl.int64)
tmp12 = tmp10 < tmp11
tmp13 = tmp9 & tmp12
tmp14 = tmp7 | tmp13
tmp15 = tl.where(tmp14, tmp0, tmp1)
tmp16 = tl.where(tmp14, tmp10, tmp11)
tmp18 = tmp15 > tmp17
tmp19 = tmp15 == tmp17
tmp20 = tmp15 != tmp15
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1, 1], 2, tl.int64)
tmp27 = tmp16 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tmp23 | tmp28
tmp30 = tl.where(tmp29, tmp15, tmp17)
tmp31 = tl.where(tmp29, tmp16, tmp26)
tmp33 = tmp30 > tmp32
tmp34 = tmp30 == tmp32
tmp35 = tmp30 != tmp30
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1, 1], 3, tl.int64)
tmp42 = tmp31 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tmp45 = tl.where(tmp44, tmp30, tmp32)
tmp46 = tl.where(tmp44, tmp31, tmp41)
tmp47 = tl.full([1, 1], -100, tl.int64)
tmp48 = tmp46 != tmp47
tmp49 = tl.where(tmp48, tmp46, tmp10)
tmp50 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp51 = tmp49 + tmp50
tmp52 = tmp49 < 0
tmp53 = tl.where(tmp52, tmp51, tmp49)
tl.device_assert((0 <= tmp53) & (tmp53 < 4), "index out of bounds: 0 <= tmp53 < 4")
tmp55 = tl.load(in_ptr1 + (r0 + (64*tmp53)), None)
tmp57 = tl_math.exp(tmp56)
tmp59 = tl_math.exp(tmp58)
tmp60 = tmp57 + tmp59
tmp62 = tl_math.exp(tmp61)
tmp63 = tmp60 + tmp62
tmp65 = tl_math.exp(tmp64)
tmp66 = tmp63 + tmp65
tmp67 = tl_math.log(tmp66)
tmp68 = tmp55 - tmp67
tmp69 = -tmp68
tmp70 = 0.0
tmp71 = tl.where(tmp48, tmp69, tmp70)
tmp72 = tl.broadcast_to(tmp71, [XBLOCK, RBLOCK])
tmp74 = tl.sum(tmp72, 1)[:, None]
tmp75 = tmp48.to(tl.int64)
tmp76 = tl.broadcast_to(tmp75, [XBLOCK, RBLOCK])
tmp78 = tl.sum(tmp76, 1)[:, None]
tmp79 = tmp78.to(tl.float32)
tmp80 = tmp74 / tmp79
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp80, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((64, 4), (1, 64), torch.float32)
# Topologically Sorted Source Nodes: [cross_entropy], Original ATen: [aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax_0.run(arg1_1, buf1, 256, grid=grid(256), stream=stream0)
del arg1_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf4 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [max_1, cross_entropy], Original ATen: [aten.max, aten.nll_loss_forward]
triton_per_fused_max_nll_loss_forward_1.run(buf4, arg0_1, buf1, 1, 64, grid=grid(1), stream=stream0)
del arg0_1
del buf1
return (buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.model_zoo
class CE_loss(nn.Module):
def __init__(self):
super().__init__()
self.loss = nn.CrossEntropyLoss()
def forward(self, predict, target):
n, _c, h, w = target.data.shape
predict = predict.permute(0, 2, 3, 1).contiguous().view(n * h * w, -1)
target = target.permute(0, 2, 3, 1).contiguous().view(n * h * w, -1)
return self.loss(predict, torch.max(target, 1)[1])
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.model_zoo
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = xindex // 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + (16 * x1 + 64 * (x0 // 16) + x0 % 16), xmask)
tmp1 = tl.load(in_ptr0 + (64 * (x0 // 16) + x0 % 16), xmask,
eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + 64 * (x0 // 16) + x0 % 16), xmask,
eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + 64 * (x0 // 16) + x0 % 16), xmask,
eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + 64 * (x0 // 16) + x0 % 16), xmask,
eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_per_fused_max_nll_loss_forward_1(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (64 * (r0 // 16) + r0 % 16), None)
tmp1 = tl.load(in_ptr0 + (16 + 64 * (r0 // 16) + r0 % 16), None)
tmp17 = tl.load(in_ptr0 + (32 + 64 * (r0 // 16) + r0 % 16), None)
tmp32 = tl.load(in_ptr0 + (48 + 64 * (r0 // 16) + r0 % 16), None)
tmp56 = tl.load(in_ptr1 + r0, None)
tmp58 = tl.load(in_ptr1 + (64 + r0), None)
tmp61 = tl.load(in_ptr1 + (128 + r0), None)
tmp64 = tl.load(in_ptr1 + (192 + r0), None)
tmp2 = tmp0 > tmp1
tmp3 = tmp0 == tmp1
tmp4 = tmp0 != tmp0
tmp5 = tmp1 != tmp1
tmp6 = tmp4 > tmp5
tmp7 = tmp2 | tmp6
tmp8 = tmp4 & tmp5
tmp9 = tmp3 | tmp8
tmp10 = tl.full([1, 1], 0, tl.int64)
tmp11 = tl.full([1, 1], 1, tl.int64)
tmp12 = tmp10 < tmp11
tmp13 = tmp9 & tmp12
tmp14 = tmp7 | tmp13
tmp15 = tl.where(tmp14, tmp0, tmp1)
tmp16 = tl.where(tmp14, tmp10, tmp11)
tmp18 = tmp15 > tmp17
tmp19 = tmp15 == tmp17
tmp20 = tmp15 != tmp15
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1, 1], 2, tl.int64)
tmp27 = tmp16 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tmp23 | tmp28
tmp30 = tl.where(tmp29, tmp15, tmp17)
tmp31 = tl.where(tmp29, tmp16, tmp26)
tmp33 = tmp30 > tmp32
tmp34 = tmp30 == tmp32
tmp35 = tmp30 != tmp30
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1, 1], 3, tl.int64)
tmp42 = tmp31 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tl.where(tmp44, tmp30, tmp32)
tmp46 = tl.where(tmp44, tmp31, tmp41)
tmp47 = tl.full([1, 1], -100, tl.int64)
tmp48 = tmp46 != tmp47
tmp49 = tl.where(tmp48, tmp46, tmp10)
tmp50 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp51 = tmp49 + tmp50
tmp52 = tmp49 < 0
tmp53 = tl.where(tmp52, tmp51, tmp49)
tl.device_assert((0 <= tmp53) & (tmp53 < 4),
'index out of bounds: 0 <= tmp53 < 4')
tmp55 = tl.load(in_ptr1 + (r0 + 64 * tmp53), None)
tmp57 = tl_math.exp(tmp56)
tmp59 = tl_math.exp(tmp58)
tmp60 = tmp57 + tmp59
tmp62 = tl_math.exp(tmp61)
tmp63 = tmp60 + tmp62
tmp65 = tl_math.exp(tmp64)
tmp66 = tmp63 + tmp65
tmp67 = tl_math.log(tmp66)
tmp68 = tmp55 - tmp67
tmp69 = -tmp68
tmp70 = 0.0
tmp71 = tl.where(tmp48, tmp69, tmp70)
tmp72 = tl.broadcast_to(tmp71, [XBLOCK, RBLOCK])
tmp74 = tl.sum(tmp72, 1)[:, None]
tmp75 = tmp48.to(tl.int64)
tmp76 = tl.broadcast_to(tmp75, [XBLOCK, RBLOCK])
tmp78 = tl.sum(tmp76, 1)[:, None]
tmp79 = tmp78.to(tl.float32)
tmp80 = tmp74 / tmp79
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp80, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((64, 4), (1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf1, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf4 = buf2
del buf2
triton_per_fused_max_nll_loss_forward_1[grid(1)](buf4, arg0_1, buf1,
1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del buf1
return buf4,
class CE_lossNew(nn.Module):
def __init__(self):
super().__init__()
self.loss = nn.CrossEntropyLoss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| SeleSchaefer/super_resolution | CE_loss | false | 17,947 | [
"MIT"
] | 5 | bf28a959fb150ceeadbd9f0bcfc12f3025cf82f4 | https://github.com/SeleSchaefer/super_resolution/tree/bf28a959fb150ceeadbd9f0bcfc12f3025cf82f4 |
LogSTFTMagnitude | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/i3/ci3ormxqfwcgqbuv7gskbwxw6a4qlzze6sbb5iwt6qf5jahctpkm.py
# Topologically Sorted Source Nodes: [log_predicts_mag, log_targets_mag, outputs], Original ATen: [aten.log, aten.sub, aten.abs, aten.mean]
# Source node to ATen node mapping:
# log_predicts_mag => log
# log_targets_mag => log_1
# outputs => abs_1, mean, sub
# Graph fragment:
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%arg0_1,), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%arg1_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%log, %log_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%abs_1,), kwargs = {})
triton_per_fused_abs_log_mean_sub_0 = async_compile.triton('triton_per_fused_abs_log_mean_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_log_mean_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_log_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp2 = tl.load(in_ptr1 + (r0), None)
tmp1 = tl_math.log(tmp0)
tmp3 = tl_math.log(tmp2)
tmp4 = tmp1 - tmp3
tmp5 = tl_math.abs(tmp4)
tmp6 = tl.broadcast_to(tmp5, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = 256.0
tmp10 = tmp8 / tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp10, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [log_predicts_mag, log_targets_mag, outputs], Original ATen: [aten.log, aten.sub, aten.abs, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_abs_log_mean_sub_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
class LogSTFTMagnitude(nn.Module):
def __init__(self):
super().__init__()
def forward(self, predicts_mag, targets_mag):
log_predicts_mag = torch.log(predicts_mag)
log_targets_mag = torch.log(targets_mag)
outputs = F.l1_loss(log_predicts_mag, log_targets_mag)
return outputs
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_log_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp2 = tl.load(in_ptr1 + r0, None)
tmp1 = tl_math.log(tmp0)
tmp3 = tl_math.log(tmp2)
tmp4 = tmp1 - tmp3
tmp5 = tl_math.abs(tmp4)
tmp6 = tl.broadcast_to(tmp5, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = 256.0
tmp10 = tmp8 / tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_log_mean_sub_0[grid(1)](buf1, arg0_1, arg1_1,
1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class LogSTFTMagnitudeNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| SolomidHero/speech-regeneration-enhancer | LogSTFTMagnitude | false | 17,948 | [
"MIT"
] | 8 | eb43907ff085d68a707ff7bc3af14e93ff66fd65 | https://github.com/SolomidHero/speech-regeneration-enhancer/tree/eb43907ff085d68a707ff7bc3af14e93ff66fd65 |
Smoother | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/cx/ccxdntvgdmpeboix6jrnqty7sck5gd2vd77owidr2jlq3bhcxkmq.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# multi_head_attention_forward => clone
# Graph fragment:
# %clone : [num_users=3] = call_function[target=torch.ops.aten.clone.default](args = (%squeeze,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 16
x2 = (xindex // 64)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (12*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/b7/cb7drhznehu7kyo7a2rds6u5pp2h4fyaiej7npnvnba3lnirgdrn.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# multi_head_attention_forward => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_2, 1.0), kwargs = {})
triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/kz/ckzqylporms4fvgcrqg44ypprwpanp6hf222rji24wskr3b44aga.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/4d/c4dndrlfjcamjfnn3ng5agjc3ahefdgw6jcsnn6hm4ljwpbfbe7h.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/pk/cpkj2gzrr3t7udq74jxyf6f5j2ecexhtn3ldfrrndgdckrxwrzkl.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# multi_head_attention_forward => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_6,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + (16*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/yv/cyvu7b655f7w4y6fs3cr3d3vawpnn3vmcirao3tw5zgpuuobc2mb.py
# Topologically Sorted Source Nodes: [src, src_1], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# src => add
# src_1 => var_mean
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %view_7), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [2]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_5 = async_compile.triton('triton_poi_fused_add_native_layer_norm_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/rp/crp6yznjcr5keantuusvl77ssv2xcxe4iqpzesafqd5zf32kmhfv.py
# Topologically Sorted Source Nodes: [src, src_1], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# src => add
# src_1 => add_1, add_2, mul_1, mul_2, rsqrt, sub_1
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %view_7), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %primals_6), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_7), kwargs = {})
triton_poi_fused_add_native_layer_norm_6 = async_compile.triton('triton_poi_fused_add_native_layer_norm_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/kf/ckf2wkpio5gwsmq55gvz4qbn3w6g75h7okcfpct22y5jbxnnun2n.py
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv1d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute_9, %primals_8, %primals_9, [1], [4], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_7 = async_compile.triton('triton_poi_fused_convolution_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (16*x1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/kq/ckqyzns4h3qj6vlkbs3ixuhjdargnbofafgcx7hmose6wv77krjt.py
# Topologically Sorted Source Nodes: [conv1d, relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv1d => convolution
# relu => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute_9, %primals_8, %primals_9, [1], [4], [1], False, [0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_8 = async_compile.triton('triton_poi_fused_convolution_relu_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/lf/clfrocyhodyuhroclwkd75hcorq4jyer37vul2btnaggoz2sd6al.py
# Topologically Sorted Source Nodes: [src_2], Original ATen: [aten.add]
# Source node to ATen node mapping:
# src_2 => add_3
# Graph fragment:
# %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %permute_11), kwargs = {})
triton_poi_fused_add_9 = async_compile.triton('triton_poi_fused_add_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 16], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_9(in_ptr0, in_ptr1, in_ptr2, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = xindex
y0 = yindex
x1 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x3 + (16*y0)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0 + (4*x3)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(out_ptr0 + (x3 + (16*y0)), tmp4, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/x5/cx5gtyeablavvxuctulbbmxt6iktkzzq7jji7e3b4efuwhs7j2eu.py
# Topologically Sorted Source Nodes: [src_3], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# src_3 => add_4, rsqrt_1, var_mean_1
# Graph fragment:
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [2]), kwargs = {correction: 0, keepdim: True})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
triton_poi_fused_native_layer_norm_10 = async_compile.triton('triton_poi_fused_native_layer_norm_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/vn/cvn6wpzho3qxzbnigol4pvjqtdlc2j4ikddxhcpnyvd73zs7v6ih.py
# Topologically Sorted Source Nodes: [src_3], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# src_3 => add_4, add_5, mul_3, mul_4, rsqrt_1, sub_2, var_mean_1
# Graph fragment:
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [2]), kwargs = {correction: 0, keepdim: True})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %getitem_3), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %primals_12), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_13), kwargs = {})
triton_poi_fused_native_layer_norm_11 = async_compile.triton('triton_poi_fused_native_layer_norm_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (12, ), (1, ))
assert_size_stride(primals_3, (12, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4, 9), (36, 9, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, ), (1, ))
assert_size_stride(primals_13, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf0)
del primals_3
buf1 = empty_strided_cuda((3, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf0, primals_2, buf1, 192, grid=grid(192), stream=stream0)
del buf0
del primals_2
buf2 = empty_strided_cuda((16, 4, 1), (1, 16, 64), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul]
triton_poi_fused_mul_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0)
buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul, aten.bmm]
extern_kernels.bmm(buf2, reinterpret_tensor(buf1, (16, 1, 4), (1, 0, 16), 64), out=buf3)
buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf3, buf4, 256, grid=grid(256), stream=stream0)
buf5 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf4, buf5, 256, grid=grid(256), stream=stream0)
del buf4
buf6 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(buf5, reinterpret_tensor(buf1, (16, 4, 1), (1, 16, 0), 128), out=buf6)
buf7 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf6, buf7, 4, 16, grid=grid(4, 16), stream=stream0)
buf8 = reinterpret_tensor(buf6, (16, 4), (4, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf7, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf8)
del primals_5
buf9 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [src, src_1], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_5.run(primals_1, buf8, buf9, buf10, 16, grid=grid(16), stream=stream0)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [src, src_1], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_6.run(primals_1, buf8, buf9, buf10, primals_6, primals_7, buf11, 64, grid=grid(64), stream=stream0)
del primals_7
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_7.run(buf11, buf12, 16, 4, grid=grid(16, 4), stream=stream0)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
buf13 = extern_kernels.convolution(buf12, primals_8, stride=(1,), padding=(4,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf13, (4, 4, 4), (16, 4, 1))
buf14 = buf13; del buf13 # reuse
# Topologically Sorted Source Nodes: [conv1d, relu], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf14, primals_9, 64, grid=grid(64), stream=stream0)
del primals_9
# Topologically Sorted Source Nodes: [src2_1], Original ATen: [aten.convolution]
buf15 = extern_kernels.convolution(buf14, primals_10, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf15, (4, 4, 4), (16, 4, 1))
buf16 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [src_2], Original ATen: [aten.add]
triton_poi_fused_add_9.run(buf11, buf15, primals_11, buf16, 4, 16, grid=grid(4, 16), stream=stream0)
del primals_11
buf17 = buf9; del buf9 # reuse
buf18 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [src_3], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_10.run(buf16, buf17, buf18, 16, grid=grid(16), stream=stream0)
buf19 = buf15; del buf15 # reuse
# Topologically Sorted Source Nodes: [src_3], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_11.run(buf16, buf17, buf18, primals_12, primals_13, buf19, 64, grid=grid(64), stream=stream0)
del buf17
del buf18
del primals_13
return (buf19, primals_1, primals_6, primals_8, primals_10, primals_12, buf5, reinterpret_tensor(buf7, (16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (4, 4, 4), (4, 1, 16), 0), buf14, buf16, primals_4, reinterpret_tensor(buf1, (16, 1, 4), (1, 1, 16), 128), reinterpret_tensor(buf2, (16, 1, 4), (1, 1, 16), 0), reinterpret_tensor(buf1, (16, 4, 1), (1, 16, 1), 64), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4, 9), (36, 9, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import torch
from torch import Tensor
from typing import Optional
import torch.nn.functional as F
from torch.nn import Dropout
from torch.nn import LayerNorm
from torch.nn import Conv1d
from torch.nn import MultiheadAttention
class Smoother(Module):
"""Convolutional Transformer Encoder Layer"""
def __init__(self, d_model: 'int', nhead: 'int', d_hid: 'int', dropout=0.1
):
super(Smoother, self).__init__()
self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
self.conv1 = Conv1d(d_model, d_hid, 9, padding=4)
self.conv2 = Conv1d(d_hid, d_model, 1, padding=0)
self.norm1 = LayerNorm(d_model)
self.norm2 = LayerNorm(d_model)
self.dropout1 = Dropout(dropout)
self.dropout2 = Dropout(dropout)
def forward(self, src: 'Tensor', src_mask: 'Optional[Tensor]'=None,
src_key_padding_mask: 'Optional[Tensor]'=None) ->Tensor:
src2 = self.self_attn(src, src, src, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = src.transpose(0, 1).transpose(1, 2)
src2 = self.conv2(F.relu(self.conv1(src2)))
src2 = src2.transpose(1, 2).transpose(0, 1)
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'nhead': 4, 'd_hid': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch.nn import Module
from torch.nn import Dropout
from torch.nn import LayerNorm
from torch.nn import Conv1d
from torch.nn import MultiheadAttention
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 16
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 12 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_convolution_7(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x1), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_9(in_ptr0, in_ptr1, in_ptr2, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = xindex
y0 = yindex
x1 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x3 + 16 * y0), xmask & ymask, eviction_policy
='evict_last')
tmp1 = tl.load(in_ptr1 + (y0 + 4 * x3), xmask & ymask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(out_ptr0 + (x3 + 16 * y0), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_native_layer_norm_10(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_11(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (12,), (1,))
assert_size_stride(primals_3, (12, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 9), (36, 9, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf0)
del primals_3
buf1 = empty_strided_cuda((3, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(192)](buf0, primals_2, buf1, 192,
XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del primals_2
buf2 = empty_strided_cuda((16, 4, 1), (1, 16, 64), torch.float32)
triton_poi_fused_mul_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf2, reinterpret_tensor(buf1, (16, 1, 4), (1, 0,
16), 64), out=buf3)
buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf5 = buf3
del buf3
triton_poi_fused__softmax_3[grid(256)](buf4, buf5, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf4
buf6 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf5, reinterpret_tensor(buf1, (16, 4, 1), (1,
16, 0), 128), out=buf6)
buf7 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32)
triton_poi_fused_clone_4[grid(4, 16)](buf6, buf7, 4, 16, XBLOCK=16,
YBLOCK=4, num_warps=1, num_stages=1)
buf8 = reinterpret_tensor(buf6, (16, 4), (4, 1), 0)
del buf6
extern_kernels.addmm(primals_5, reinterpret_tensor(buf7, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf8)
del primals_5
buf9 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_5[grid(16)](primals_1, buf8,
buf9, buf10, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_6[grid(64)](primals_1, buf8,
buf9, buf10, primals_6, primals_7, buf11, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_7
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_convolution_7[grid(16, 4)](buf11, buf12, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf13 = extern_kernels.convolution(buf12, primals_8, stride=(1,),
padding=(4,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf13, (4, 4, 4), (16, 4, 1))
buf14 = buf13
del buf13
triton_poi_fused_convolution_relu_8[grid(64)](buf14, primals_9, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_9
buf15 = extern_kernels.convolution(buf14, primals_10, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf15, (4, 4, 4), (16, 4, 1))
buf16 = buf12
del buf12
triton_poi_fused_add_9[grid(4, 16)](buf11, buf15, primals_11, buf16,
4, 16, XBLOCK=16, YBLOCK=2, num_warps=1, num_stages=1)
del primals_11
buf17 = buf9
del buf9
buf18 = buf10
del buf10
triton_poi_fused_native_layer_norm_10[grid(16)](buf16, buf17, buf18,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf19 = buf15
del buf15
triton_poi_fused_native_layer_norm_11[grid(64)](buf16, buf17, buf18,
primals_12, primals_13, buf19, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf17
del buf18
del primals_13
return (buf19, primals_1, primals_6, primals_8, primals_10, primals_12,
buf5, reinterpret_tensor(buf7, (16, 4), (4, 1), 0), buf8,
reinterpret_tensor(buf11, (4, 4, 4), (4, 1, 16), 0), buf14, buf16,
primals_4, reinterpret_tensor(buf1, (16, 1, 4), (1, 1, 16), 128),
reinterpret_tensor(buf2, (16, 1, 4), (1, 1, 16), 0),
reinterpret_tensor(buf1, (16, 4, 1), (1, 16, 1), 64))
class SmootherNew(Module):
"""Convolutional Transformer Encoder Layer"""
def __init__(self, d_model: 'int', nhead: 'int', d_hid: 'int', dropout=0.1
):
super(SmootherNew, self).__init__()
self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
self.conv1 = Conv1d(d_model, d_hid, 9, padding=4)
self.conv2 = Conv1d(d_hid, d_model, 1, padding=0)
self.norm1 = LayerNorm(d_model)
self.norm2 = LayerNorm(d_model)
self.dropout1 = Dropout(dropout)
self.dropout2 = Dropout(dropout)
def forward(self, input_0):
primals_3 = self.self_attn.in_proj_weight
primals_2 = self.self_attn.in_proj_bias
primals_4 = self.self_attn.out_proj.weight
primals_5 = self.self_attn.out_proj.bias
primals_8 = self.conv1.weight
primals_6 = self.conv1.bias
primals_10 = self.conv2.weight
primals_7 = self.conv2.bias
primals_9 = self.norm1.weight
primals_11 = self.norm1.bias
primals_12 = self.norm2.weight
primals_13 = self.norm2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
| SolomidHero/FragmentVC-with-RAdam | Smoother | false | 17,949 | [
"MIT"
] | 6 | a0ee884155a4e8f47d8950a35258e58987f6289e | https://github.com/SolomidHero/FragmentVC-with-RAdam/tree/a0ee884155a4e8f47d8950a35258e58987f6289e |
Extractor | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/cx/ccxdntvgdmpeboix6jrnqty7sck5gd2vd77owidr2jlq3bhcxkmq.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# multi_head_attention_forward => clone
# Graph fragment:
# %clone : [num_users=3] = call_function[target=torch.ops.aten.clone.default](args = (%squeeze,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 16
x2 = (xindex // 64)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (12*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/b7/cb7drhznehu7kyo7a2rds6u5pp2h4fyaiej7npnvnba3lnirgdrn.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# multi_head_attention_forward => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_2, 1.0), kwargs = {})
triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/kz/ckzqylporms4fvgcrqg44ypprwpanp6hf222rji24wskr3b44aga.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/4d/c4dndrlfjcamjfnn3ng5agjc3ahefdgw6jcsnn6hm4ljwpbfbe7h.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/pk/cpkj2gzrr3t7udq74jxyf6f5j2ecexhtn3ldfrrndgdckrxwrzkl.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# multi_head_attention_forward => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_6,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + (16*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/yv/cyvu7b655f7w4y6fs3cr3d3vawpnn3vmcirao3tw5zgpuuobc2mb.py
# Topologically Sorted Source Nodes: [tgt, tgt_1], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# tgt => add
# tgt_1 => var_mean
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %view_7), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [2]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_5 = async_compile.triton('triton_poi_fused_add_native_layer_norm_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/rp/crp6yznjcr5keantuusvl77ssv2xcxe4iqpzesafqd5zf32kmhfv.py
# Topologically Sorted Source Nodes: [tgt, tgt_1], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# tgt => add
# tgt_1 => add_1, add_2, mul_1, mul_2, rsqrt, sub_1
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %view_7), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %primals_6), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_7), kwargs = {})
triton_poi_fused_add_native_layer_norm_6 = async_compile.triton('triton_poi_fused_add_native_layer_norm_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/3a/c3ao6b4bta7yhcohsith3bc2vbncozvobnjzjrklhade4rxmrjoq.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# multi_head_attention_forward_1 => clone_3
# Graph fragment:
# %clone_3 : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%squeeze_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_7 = async_compile.triton('triton_poi_fused_clone_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 16
x2 = (xindex // 64)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (8*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (4 + x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/uz/cuzmy4w36lei7gxzliur3uvbtz2jbtl3gi5bgcqqvswpujeyrrl4.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# multi_head_attention_forward_1 => mul_3
# Graph fragment:
# %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_11, 1.0), kwargs = {})
triton_poi_fused_mul_8 = async_compile.triton('triton_poi_fused_mul_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x2 % 4), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/pw/cpwpuk3iko4onldffptbkmuntjzpad656fn35kacaf23ene4ris7.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# multi_head_attention_forward_1 => mean_1
# Graph fragment:
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view_19, [1]), kwargs = {})
triton_poi_fused_mean_9 = async_compile.triton('triton_poi_fused_mean_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_9(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/gq/cgqmzyp4qpyt47c7rayhctb3uo5esykmv4uvim3wfftupp4belks.py
# Topologically Sorted Source Nodes: [tgt_2], Original ATen: [aten.add]
# Source node to ATen node mapping:
# tgt_2 => add_3
# Graph fragment:
# %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %view_18), kwargs = {})
triton_poi_fused_add_10 = async_compile.triton('triton_poi_fused_add_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/s5/cs5tbas5imaqzo4yvz3zezxm7bd4jhg4i5nuoqfxmfzzfffv3tbn.py
# Topologically Sorted Source Nodes: [tgt_3], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# tgt_3 => add_4, rsqrt_1, var_mean_1
# Graph fragment:
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [2]), kwargs = {correction: 0, keepdim: True})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_6, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
triton_poi_fused_native_layer_norm_11 = async_compile.triton('triton_poi_fused_native_layer_norm_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_11(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/6y/c6yvw2wds65dhzesixpshewv27ijhlwgsarjlhphy7wd2jfty2tt.py
# Topologically Sorted Source Nodes: [tgt_3], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# tgt_3 => add_4, add_5, mul_4, mul_5, rsqrt_1, sub_3, var_mean_1
# Graph fragment:
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [2]), kwargs = {correction: 0, keepdim: True})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_6, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %getitem_7), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %rsqrt_1), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, %primals_13), kwargs = {})
# %add_5 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, %primals_14), kwargs = {})
triton_poi_fused_native_layer_norm_12 = async_compile.triton('triton_poi_fused_native_layer_norm_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_12(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ue/cuezvw2v7fxgdugxr77dapvfdifkq7czb7qzylxei3mqasrnqoas.py
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv1d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute_18, %primals_15, %primals_16, [1], [4], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_13 = async_compile.triton('triton_poi_fused_convolution_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_13(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (16*x1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/zg/czghoqivnbn3u6csxzepcalhjf6mshuuc4qnqi3pymt3ftgbmcee.py
# Topologically Sorted Source Nodes: [conv1d, relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv1d => convolution
# relu => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute_18, %primals_15, %primals_16, [1], [4], [1], False, [0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_14 = async_compile.triton('triton_poi_fused_convolution_relu_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/vd/cvde6jcyccplkmqfm7kamced5nuexr2yas7t4yc7odofgagmrqdj.py
# Topologically Sorted Source Nodes: [tgt_4], Original ATen: [aten.add]
# Source node to ATen node mapping:
# tgt_4 => add_6
# Graph fragment:
# %add_6 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %permute_20), kwargs = {})
triton_poi_fused_add_15 = async_compile.triton('triton_poi_fused_add_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 16], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_15(in_ptr0, in_ptr1, in_ptr2, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = xindex
y0 = yindex
x1 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x3 + (16*y0)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0 + (4*x3)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(out_ptr0 + (x3 + (16*y0)), tmp4, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (12, ), (1, ))
assert_size_stride(primals_3, (12, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_9, (12, 4), (4, 1))
assert_size_stride(primals_10, (12, ), (1, ))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4, ), (1, ))
assert_size_stride(primals_13, (4, ), (1, ))
assert_size_stride(primals_14, (4, ), (1, ))
assert_size_stride(primals_15, (4, 4, 9), (36, 9, 1))
assert_size_stride(primals_16, (4, ), (1, ))
assert_size_stride(primals_17, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_18, (4, ), (1, ))
assert_size_stride(primals_19, (4, ), (1, ))
assert_size_stride(primals_20, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf0)
del primals_3
buf1 = empty_strided_cuda((3, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf0, primals_2, buf1, 192, grid=grid(192), stream=stream0)
del buf0
del primals_2
buf2 = empty_strided_cuda((16, 4, 1), (1, 16, 64), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul]
triton_poi_fused_mul_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0)
buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul, aten.bmm]
extern_kernels.bmm(buf2, reinterpret_tensor(buf1, (16, 1, 4), (1, 0, 16), 64), out=buf3)
buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf3, buf4, 256, grid=grid(256), stream=stream0)
buf5 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf4, buf5, 256, grid=grid(256), stream=stream0)
buf6 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(buf5, reinterpret_tensor(buf1, (16, 4, 1), (1, 16, 0), 128), out=buf6)
buf7 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf6, buf7, 4, 16, grid=grid(4, 16), stream=stream0)
buf8 = reinterpret_tensor(buf6, (16, 4), (4, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf7, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf8)
del primals_5
buf9 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [tgt, tgt_1], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_5.run(primals_1, buf8, buf9, buf10, 16, grid=grid(16), stream=stream0)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [tgt, tgt_1], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_6.run(primals_1, buf8, buf9, buf10, primals_6, primals_7, buf11, 64, grid=grid(64), stream=stream0)
del primals_7
buf12 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf12)
buf13 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_8, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 8), (1, 4), 16), out=buf13)
buf14 = empty_strided_cuda((2, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten.clone]
triton_poi_fused_clone_7.run(buf13, primals_10, buf14, 128, grid=grid(128), stream=stream0)
del buf13
buf15 = reinterpret_tensor(buf12, (16, 4, 1), (1, 16, 64), 0); del buf12 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten.mul]
triton_poi_fused_mul_8.run(buf15, primals_10, 64, grid=grid(64), stream=stream0)
del primals_10
buf16 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten.bmm]
extern_kernels.bmm(buf15, reinterpret_tensor(buf14, (16, 1, 4), (1, 0, 16), 0), out=buf16)
buf17 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf16, buf17, 256, grid=grid(256), stream=stream0)
buf18 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf17, buf18, 256, grid=grid(256), stream=stream0)
del buf17
buf19 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten.bmm]
extern_kernels.bmm(buf18, reinterpret_tensor(buf14, (16, 4, 1), (1, 16, 0), 64), out=buf19)
buf20 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf19, buf20, 4, 16, grid=grid(4, 16), stream=stream0)
buf21 = reinterpret_tensor(buf19, (16, 4), (4, 1), 0); del buf19 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf20, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf21)
buf22 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten.mean]
triton_poi_fused_mean_9.run(buf18, buf22, 64, grid=grid(64), stream=stream0)
buf23 = reinterpret_tensor(buf21, (4, 4, 4), (16, 4, 1), 0); del buf21 # reuse
# Topologically Sorted Source Nodes: [tgt_2], Original ATen: [aten.add]
triton_poi_fused_add_10.run(buf23, buf11, primals_12, 64, grid=grid(64), stream=stream0)
del primals_12
buf24 = buf9; del buf9 # reuse
buf25 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [tgt_3], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_11.run(buf23, buf24, buf25, 16, grid=grid(16), stream=stream0)
buf26 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [tgt_3], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_12.run(buf23, buf24, buf25, primals_13, primals_14, buf26, 64, grid=grid(64), stream=stream0)
del primals_14
buf27 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_13.run(buf26, buf27, 16, 4, grid=grid(16, 4), stream=stream0)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
buf28 = extern_kernels.convolution(buf27, primals_15, stride=(1,), padding=(4,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf28, (4, 4, 4), (16, 4, 1))
buf29 = buf28; del buf28 # reuse
# Topologically Sorted Source Nodes: [conv1d, relu], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_14.run(buf29, primals_16, 64, grid=grid(64), stream=stream0)
del primals_16
# Topologically Sorted Source Nodes: [tgt2_1], Original ATen: [aten.convolution]
buf30 = extern_kernels.convolution(buf29, primals_17, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf30, (4, 4, 4), (16, 4, 1))
buf31 = buf27; del buf27 # reuse
# Topologically Sorted Source Nodes: [tgt_4], Original ATen: [aten.add]
triton_poi_fused_add_15.run(buf26, buf30, primals_18, buf31, 4, 16, grid=grid(4, 16), stream=stream0)
del primals_18
buf32 = buf25; del buf25 # reuse
buf33 = buf24; del buf24 # reuse
# Topologically Sorted Source Nodes: [tgt_5], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_11.run(buf31, buf32, buf33, 16, grid=grid(16), stream=stream0)
buf34 = buf30; del buf30 # reuse
# Topologically Sorted Source Nodes: [tgt_5], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_12.run(buf31, buf32, buf33, primals_19, primals_20, buf34, 64, grid=grid(64), stream=stream0)
del buf32
del buf33
del primals_20
return (buf34, buf22, primals_1, primals_6, primals_13, primals_15, primals_17, primals_19, buf5, reinterpret_tensor(buf7, (16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (16, 4), (4, 1), 0), buf18, reinterpret_tensor(buf20, (16, 4), (4, 1), 0), buf23, reinterpret_tensor(buf26, (4, 4, 4), (4, 1, 16), 0), buf29, buf31, primals_11, reinterpret_tensor(buf14, (16, 1, 4), (1, 1, 16), 64), reinterpret_tensor(buf15, (16, 1, 4), (1, 1, 16), 0), reinterpret_tensor(buf14, (16, 4, 1), (1, 16, 1), 0), reinterpret_tensor(primals_9, (4, 4), (4, 1), 0), primals_4, reinterpret_tensor(buf1, (16, 1, 4), (1, 1, 16), 128), reinterpret_tensor(buf2, (16, 1, 4), (1, 1, 16), 0), reinterpret_tensor(buf1, (16, 4, 1), (1, 16, 1), 64), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, 4, 9), (36, 9, 1), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import torch
from torch import Tensor
from typing import Optional
from typing import Tuple
import torch.nn.functional as F
from torch.nn import Dropout
from torch.nn import LayerNorm
from torch.nn import Conv1d
from torch.nn import MultiheadAttention
class Extractor(Module):
"""Convolutional Transformer Decoder Layer"""
def __init__(self, d_model: 'int', nhead: 'int', d_hid: 'int', dropout=
0.1, no_residual=False):
super(Extractor, self).__init__()
self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
self.cross_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
self.conv1 = Conv1d(d_model, d_hid, 9, padding=4)
self.conv2 = Conv1d(d_hid, d_model, 1, padding=0)
self.norm1 = LayerNorm(d_model)
self.norm2 = LayerNorm(d_model)
self.norm3 = LayerNorm(d_model)
self.dropout1 = Dropout(dropout)
self.dropout2 = Dropout(dropout)
self.dropout3 = Dropout(dropout)
self.no_residual = no_residual
def forward(self, tgt: 'Tensor', memory: 'Tensor', tgt_mask:
'Optional[Tensor]'=None, memory_mask: 'Optional[Tensor]'=None,
tgt_key_padding_mask: 'Optional[Tensor]'=None,
memory_key_padding_mask: 'Optional[Tensor]'=None, memory_features:
'Optional[Tensor]'=None) ->Tuple[Tensor, Optional[Tensor]]:
tgt2 = self.self_attn(tgt, tgt, tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2, attn = self.cross_attn(tgt, memory if memory_features is None
else memory_features, memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)
if self.no_residual:
tgt = self.dropout2(tgt2)
else:
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = tgt.transpose(0, 1).transpose(1, 2)
tgt2 = self.conv2(F.relu(self.conv1(tgt2)))
tgt2 = tgt2.transpose(1, 2).transpose(0, 1)
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt, attn
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'nhead': 4, 'd_hid': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch.nn import Module
from torch.nn import Dropout
from torch.nn import LayerNorm
from torch.nn import Conv1d
from torch.nn import MultiheadAttention
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 16
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 12 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_clone_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 16
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 8 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (4 + x0 + 4 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x2 % 4, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_mean_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_11(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_12(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_convolution_13(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x1), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_relu_14(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_15(in_ptr0, in_ptr1, in_ptr2, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = xindex
y0 = yindex
x1 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x3 + 16 * y0), xmask & ymask, eviction_policy
='evict_last')
tmp1 = tl.load(in_ptr1 + (y0 + 4 * x3), xmask & ymask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(out_ptr0 + (x3 + 16 * y0), tmp4, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (12,), (1,))
assert_size_stride(primals_3, (12, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_9, (12, 4), (4, 1))
assert_size_stride(primals_10, (12,), (1,))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4,), (1,))
assert_size_stride(primals_15, (4, 4, 9), (36, 9, 1))
assert_size_stride(primals_16, (4,), (1,))
assert_size_stride(primals_17, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_18, (4,), (1,))
assert_size_stride(primals_19, (4,), (1,))
assert_size_stride(primals_20, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf0)
del primals_3
buf1 = empty_strided_cuda((3, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(192)](buf0, primals_2, buf1, 192,
XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del primals_2
buf2 = empty_strided_cuda((16, 4, 1), (1, 16, 64), torch.float32)
triton_poi_fused_mul_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf2, reinterpret_tensor(buf1, (16, 1, 4), (1, 0,
16), 64), out=buf3)
buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf5 = buf3
del buf3
triton_poi_fused__softmax_3[grid(256)](buf4, buf5, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf6 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf5, reinterpret_tensor(buf1, (16, 4, 1), (1,
16, 0), 128), out=buf6)
buf7 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32)
triton_poi_fused_clone_4[grid(4, 16)](buf6, buf7, 4, 16, XBLOCK=16,
YBLOCK=4, num_warps=1, num_stages=1)
buf8 = reinterpret_tensor(buf6, (16, 4), (4, 1), 0)
del buf6
extern_kernels.addmm(primals_5, reinterpret_tensor(buf7, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf8)
del primals_5
buf9 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_5[grid(16)](primals_1, buf8,
buf9, buf10, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_6[grid(64)](primals_1, buf8,
buf9, buf10, primals_6, primals_7, buf11, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_7
buf12 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf12)
buf13 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_8, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_9, (4, 8), (1, 4), 16), out=buf13)
buf14 = empty_strided_cuda((2, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_7[grid(128)](buf13, primals_10, buf14, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del buf13
buf15 = reinterpret_tensor(buf12, (16, 4, 1), (1, 16, 64), 0)
del buf12
triton_poi_fused_mul_8[grid(64)](buf15, primals_10, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_10
buf16 = buf4
del buf4
extern_kernels.bmm(buf15, reinterpret_tensor(buf14, (16, 1, 4), (1,
0, 16), 0), out=buf16)
buf17 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](buf16, buf17, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf18 = buf16
del buf16
triton_poi_fused__softmax_3[grid(256)](buf17, buf18, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del buf17
buf19 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf18, reinterpret_tensor(buf14, (16, 4, 1), (1,
16, 0), 64), out=buf19)
buf20 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32)
triton_poi_fused_clone_4[grid(4, 16)](buf19, buf20, 4, 16, XBLOCK=
16, YBLOCK=4, num_warps=1, num_stages=1)
buf21 = reinterpret_tensor(buf19, (16, 4), (4, 1), 0)
del buf19
extern_kernels.mm(reinterpret_tensor(buf20, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf21)
buf22 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_mean_9[grid(64)](buf18, buf22, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf23 = reinterpret_tensor(buf21, (4, 4, 4), (16, 4, 1), 0)
del buf21
triton_poi_fused_add_10[grid(64)](buf23, buf11, primals_12, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_12
buf24 = buf9
del buf9
buf25 = buf10
del buf10
triton_poi_fused_native_layer_norm_11[grid(16)](buf23, buf24, buf25,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf26 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_12[grid(64)](buf23, buf24, buf25,
primals_13, primals_14, buf26, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del primals_14
buf27 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_convolution_13[grid(16, 4)](buf26, buf27, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf28 = extern_kernels.convolution(buf27, primals_15, stride=(1,),
padding=(4,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf28, (4, 4, 4), (16, 4, 1))
buf29 = buf28
del buf28
triton_poi_fused_convolution_relu_14[grid(64)](buf29, primals_16,
64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_16
buf30 = extern_kernels.convolution(buf29, primals_17, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf30, (4, 4, 4), (16, 4, 1))
buf31 = buf27
del buf27
triton_poi_fused_add_15[grid(4, 16)](buf26, buf30, primals_18,
buf31, 4, 16, XBLOCK=16, YBLOCK=2, num_warps=1, num_stages=1)
del primals_18
buf32 = buf25
del buf25
buf33 = buf24
del buf24
triton_poi_fused_native_layer_norm_11[grid(16)](buf31, buf32, buf33,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf34 = buf30
del buf30
triton_poi_fused_native_layer_norm_12[grid(64)](buf31, buf32, buf33,
primals_19, primals_20, buf34, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf32
del buf33
del primals_20
return (buf34, buf22, primals_1, primals_6, primals_13, primals_15,
primals_17, primals_19, buf5, reinterpret_tensor(buf7, (16, 4), (4,
1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (16, 4), (4, 1), 0), buf18,
reinterpret_tensor(buf20, (16, 4), (4, 1), 0), buf23,
reinterpret_tensor(buf26, (4, 4, 4), (4, 1, 16), 0), buf29, buf31,
primals_11, reinterpret_tensor(buf14, (16, 1, 4), (1, 1, 16), 64),
reinterpret_tensor(buf15, (16, 1, 4), (1, 1, 16), 0),
reinterpret_tensor(buf14, (16, 4, 1), (1, 16, 1), 0),
reinterpret_tensor(primals_9, (4, 4), (4, 1), 0), primals_4,
reinterpret_tensor(buf1, (16, 1, 4), (1, 1, 16), 128),
reinterpret_tensor(buf2, (16, 1, 4), (1, 1, 16), 0),
reinterpret_tensor(buf1, (16, 4, 1), (1, 16, 1), 64))
class ExtractorNew(Module):
"""Convolutional Transformer Decoder Layer"""
def __init__(self, d_model: 'int', nhead: 'int', d_hid: 'int', dropout=
0.1, no_residual=False):
super(ExtractorNew, self).__init__()
self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
self.cross_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
self.conv1 = Conv1d(d_model, d_hid, 9, padding=4)
self.conv2 = Conv1d(d_hid, d_model, 1, padding=0)
self.norm1 = LayerNorm(d_model)
self.norm2 = LayerNorm(d_model)
self.norm3 = LayerNorm(d_model)
self.dropout1 = Dropout(dropout)
self.dropout2 = Dropout(dropout)
self.dropout3 = Dropout(dropout)
self.no_residual = no_residual
def forward(self, input_0, input_1):
primals_3 = self.self_attn.in_proj_weight
primals_2 = self.self_attn.in_proj_bias
primals_4 = self.self_attn.out_proj.weight
primals_5 = self.self_attn.out_proj.bias
primals_9 = self.cross_attn.in_proj_weight
primals_10 = self.cross_attn.in_proj_bias
primals_11 = self.cross_attn.out_proj.weight
primals_6 = self.cross_attn.out_proj.bias
primals_15 = self.conv1.weight
primals_7 = self.conv1.bias
primals_17 = self.conv2.weight
primals_12 = self.conv2.bias
primals_13 = self.norm1.weight
primals_14 = self.norm1.bias
primals_16 = self.norm2.weight
primals_18 = self.norm2.bias
primals_19 = self.norm3.weight
primals_20 = self.norm3.bias
primals_1 = input_0
primals_8 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20])
return output[0], output[1]
| SolomidHero/FragmentVC-with-RAdam | Extractor | false | 17,950 | [
"MIT"
] | 6 | a0ee884155a4e8f47d8950a35258e58987f6289e | https://github.com/SolomidHero/FragmentVC-with-RAdam/tree/a0ee884155a4e8f47d8950a35258e58987f6289e |
State_Autoencoder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/u2/cu2p7goazixizpmmdiycaosknk44c674vuuwgsv554n4wqqlv52u.py
# Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# input_1 => convolution
# input_2 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/sv/csvftwrdarva5rdyau57wn2meeqtaxai5c36jeovsvhyqroi5tdl.py
# Topologically Sorted Source Nodes: [input_3, input_4], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# input_3 => convolution_1
# input_4 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/yf/cyfy3eu7um7uyec5wacdk5ji6kench67lluc5qqm2svjcq74aufg.py
# Topologically Sorted Source Nodes: [input_5, input_6], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# input_5 => convolution_2
# input_6 => gt, mul, where
# Graph fragment:
# %convolution_2 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 0.01), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution_2, %mul), kwargs = {})
triton_poi_fused_convolution_leaky_relu_2 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 100) % 64
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr1 + (x3), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/mv/cmvgpslrdxea6sqbllomn5yb2xjm56qeelcyt2w6573wsnnje7r4.py
# Topologically Sorted Source Nodes: [input_7, input_8], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# input_7 => convolution_3
# input_8 => relu_2
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
triton_poi_fused_convolution_relu_3 = async_compile.triton('triton_poi_fused_convolution_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 100) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ac/cacqnxauvodccimm5izugvwbc4y3jhm4qzlxo5ezdffe7z37u2uy.py
# Topologically Sorted Source Nodes: [input_13, input_14], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# input_13 => convolution_6
# input_14 => gt_1, mul_1, where_1
# Graph fragment:
# %convolution_6 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_4, %primals_14, %primals_15, [2, 2], [1, 1], [1, 1], True, [1, 1], 1), kwargs = {})
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_6, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_6, 0.01), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_6, %mul_1), kwargs = {})
triton_poi_fused_convolution_leaky_relu_4 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 49152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 3
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15 = args
args.clear()
assert_size_stride(primals_1, (16, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (16, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (32, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_5, (32, ), (1, ))
assert_size_stride(primals_6, (64, 32, 7, 7), (1568, 49, 7, 1))
assert_size_stride(primals_7, (64, ), (1, ))
assert_size_stride(primals_8, (64, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_9, (64, ), (1, ))
assert_size_stride(primals_10, (64, 32, 7, 7), (1568, 49, 7, 1))
assert_size_stride(primals_11, (32, ), (1, ))
assert_size_stride(primals_12, (32, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_13, (16, ), (1, ))
assert_size_stride(primals_14, (16, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_15, (3, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 32, 32), (16384, 1024, 32, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 65536, grid=grid(65536), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 16, 16), (8192, 256, 16, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [input_3, input_4], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf3, primals_5, 32768, grid=grid(32768), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [input_5], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 10, 10), (6400, 100, 10, 1))
buf5 = empty_strided_cuda((4, 64, 10, 10), (6400, 100, 10, 1), torch.bool)
buf6 = empty_strided_cuda((4, 64, 10, 10), (6400, 100, 10, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_5, input_6], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_2.run(buf4, primals_7, buf5, buf6, 25600, grid=grid(25600), stream=stream0)
del buf4
del primals_7
# Topologically Sorted Source Nodes: [input_7], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf6, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 64, 10, 10), (6400, 100, 10, 1))
buf8 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [input_7, input_8], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_3.run(buf8, primals_9, 25600, grid=grid(25600), stream=stream0)
del primals_9
# Topologically Sorted Source Nodes: [input_9], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf8, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 32, 16, 16), (8192, 256, 16, 1))
buf10 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [input_9, input_10], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf10, primals_11, 32768, grid=grid(32768), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [input_11], Original ATen: [aten.convolution]
buf11 = extern_kernels.convolution(buf10, primals_12, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf11, (4, 16, 32, 32), (16384, 1024, 32, 1))
buf12 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [input_11, input_12], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_0.run(buf12, primals_13, 65536, grid=grid(65536), stream=stream0)
del primals_13
# Topologically Sorted Source Nodes: [input_13], Original ATen: [aten.convolution]
buf13 = extern_kernels.convolution(buf12, primals_14, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf13, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf14 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.bool)
buf15 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_13, input_14], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_4.run(buf13, primals_15, buf14, buf15, 49152, grid=grid(49152), stream=stream0)
del buf13
del primals_15
return (buf15, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, buf1, buf3, buf5, buf6, buf8, buf10, buf12, buf14, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((16, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((64, 32, 7, 7), (1568, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((64, 64, 1, 1), (64, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((64, 32, 7, 7), (1568, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((32, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((16, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from collections import OrderedDict
class State_Autoencoder(nn.Module):
def __init__(self, frame_stacks=1, channels=3):
super(State_Autoencoder, self).__init__()
self.encoder = nn.Sequential(OrderedDict([('encoder_conv1', nn.
Conv2d(channels * frame_stacks, 16, kernel_size=3, stride=2,
padding=1)), ('encoder_relu1', nn.ReLU()), ('encoder_conv2', nn
.Conv2d(16, 32, kernel_size=3, stride=2, padding=1)), (
'encoder_relu2', nn.ReLU()), ('encoder_conv3', nn.Conv2d(32, 64,
kernel_size=7)), ('encoder_relu3', nn.LeakyReLU())]))
self.bottleneck = nn.Sequential(OrderedDict([('bottleneck_conv1',
nn.Conv2d(64, 64, kernel_size=(1, 1))), ('bottleneck_relu1', nn
.ReLU())]))
self.decoder = nn.Sequential(OrderedDict([('decoder_Tconv1', nn.
ConvTranspose2d(64, 32, kernel_size=7)), ('decoder_relu1', nn.
ReLU()), ('decoder_Tconv2', nn.ConvTranspose2d(32, 16,
kernel_size=3, stride=2, padding=1, output_padding=1)), (
'decoder_relu2', nn.ReLU()), ('decoder_Tconv3', nn.
ConvTranspose2d(16, channels * frame_stacks, kernel_size=3,
stride=2, padding=1, output_padding=1)), ('decoder_relu3', nn.
LeakyReLU())]))
def forward(self, x):
x = self.encoder(x)
x1 = self.bottleneck(x)
x1 = self.decoder(x1)
return x1
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from collections import OrderedDict
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 100 % 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 100 % 64
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_4(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 3
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15) = args
args.clear()
assert_size_stride(primals_1, (16, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (32, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (64, 32, 7, 7), (1568, 49, 7, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (64, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_9, (64,), (1,))
assert_size_stride(primals_10, (64, 32, 7, 7), (1568, 49, 7, 1))
assert_size_stride(primals_11, (32,), (1,))
assert_size_stride(primals_12, (32, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_13, (16,), (1,))
assert_size_stride(primals_14, (16, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_15, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2,
2), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 32, 32), (16384, 1024, 32, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(65536)](buf1, primals_2,
65536, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 16, 16), (8192, 256, 16, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(32768)](buf3, primals_5,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 10, 10), (6400, 100, 10, 1))
buf5 = empty_strided_cuda((4, 64, 10, 10), (6400, 100, 10, 1),
torch.bool)
buf6 = empty_strided_cuda((4, 64, 10, 10), (6400, 100, 10, 1),
torch.float32)
triton_poi_fused_convolution_leaky_relu_2[grid(25600)](buf4,
primals_7, buf5, buf6, 25600, XBLOCK=128, num_warps=4, num_stages=1
)
del buf4
del primals_7
buf7 = extern_kernels.convolution(buf6, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 64, 10, 10), (6400, 100, 10, 1))
buf8 = buf7
del buf7
triton_poi_fused_convolution_relu_3[grid(25600)](buf8, primals_9,
25600, XBLOCK=128, num_warps=4, num_stages=1)
del primals_9
buf9 = extern_kernels.convolution(buf8, primals_10, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 32, 16, 16), (8192, 256, 16, 1))
buf10 = buf9
del buf9
triton_poi_fused_convolution_relu_1[grid(32768)](buf10, primals_11,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
buf11 = extern_kernels.convolution(buf10, primals_12, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf11, (4, 16, 32, 32), (16384, 1024, 32, 1))
buf12 = buf11
del buf11
triton_poi_fused_convolution_relu_0[grid(65536)](buf12, primals_13,
65536, XBLOCK=256, num_warps=4, num_stages=1)
del primals_13
buf13 = extern_kernels.convolution(buf12, primals_14, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf13, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf14 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1),
torch.bool)
buf15 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1),
torch.float32)
triton_poi_fused_convolution_leaky_relu_4[grid(49152)](buf13,
primals_15, buf14, buf15, 49152, XBLOCK=512, num_warps=4,
num_stages=1)
del buf13
del primals_15
return (buf15, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, buf1, buf3, buf5, buf6, buf8,
buf10, buf12, buf14)
class State_AutoencoderNew(nn.Module):
def __init__(self, frame_stacks=1, channels=3):
super(State_AutoencoderNew, self).__init__()
self.encoder = nn.Sequential(OrderedDict([('encoder_conv1', nn.
Conv2d(channels * frame_stacks, 16, kernel_size=3, stride=2,
padding=1)), ('encoder_relu1', nn.ReLU()), ('encoder_conv2', nn
.Conv2d(16, 32, kernel_size=3, stride=2, padding=1)), (
'encoder_relu2', nn.ReLU()), ('encoder_conv3', nn.Conv2d(32, 64,
kernel_size=7)), ('encoder_relu3', nn.LeakyReLU())]))
self.bottleneck = nn.Sequential(OrderedDict([('bottleneck_conv1',
nn.Conv2d(64, 64, kernel_size=(1, 1))), ('bottleneck_relu1', nn
.ReLU())]))
self.decoder = nn.Sequential(OrderedDict([('decoder_Tconv1', nn.
ConvTranspose2d(64, 32, kernel_size=7)), ('decoder_relu1', nn.
ReLU()), ('decoder_Tconv2', nn.ConvTranspose2d(32, 16,
kernel_size=3, stride=2, padding=1, output_padding=1)), (
'decoder_relu2', nn.ReLU()), ('decoder_Tconv3', nn.
ConvTranspose2d(16, channels * frame_stacks, kernel_size=3,
stride=2, padding=1, output_padding=1)), ('decoder_relu3', nn.
LeakyReLU())]))
def forward(self, input_0):
primals_1 = self.encoder.encoder_conv1.weight
primals_2 = self.encoder.encoder_conv1.bias
primals_4 = self.encoder.encoder_conv2.weight
primals_5 = self.encoder.encoder_conv2.bias
primals_6 = self.encoder.encoder_conv3.weight
primals_7 = self.encoder.encoder_conv3.bias
primals_8 = self.bottleneck.bottleneck_conv1.weight
primals_9 = self.bottleneck.bottleneck_conv1.bias
primals_10 = self.decoder.decoder_Tconv1.weight
primals_11 = self.decoder.decoder_Tconv1.bias
primals_12 = self.decoder.decoder_Tconv2.weight
primals_13 = self.decoder.decoder_Tconv2.bias
primals_14 = self.decoder.decoder_Tconv3.weight
primals_15 = self.decoder.decoder_Tconv3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15])
return output[0]
| Squishy123/GDE_net | State_Autoencoder | false | 17,951 | [
"Apache-2.0"
] | 4 | 9094cbf58edbf0d62a2b2cd66743322597f66269 | https://github.com/Squishy123/GDE_net/tree/9094cbf58edbf0d62a2b2cd66743322597f66269 |
SmallMnistNoDropout | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/ul/culfeun7wmrsya4fv22he6lp67htodmnmbg4achhwyybqeqe3ljt.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 144000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 3600) % 10
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/5l/c5lfmsfuebobasgmahlh54mpbwcpvkqq3xtpdx5es3dj6cpvjw77.py
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 250880
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = (xindex // 3136) % 20
x0 = xindex % 3136
x3 = (xindex // 3136)
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x0 + (3200*x3)), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ov/covxr2juktxjwdz3javvk6wtjtgx3iticwwsytpotrpg5czkgast.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_3 => relu_2
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_7), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_2 = async_compile.triton('triton_poi_fused_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 39200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 50
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/i7/ci7jltkvpyv54u2b2qwcmikhqlfg7bmual2jgwjmrckawmdx6w76.py
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => amax, exp, log, sub, sub_1, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_1, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_1, %amax), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_per_fused__log_softmax_3 = async_compile.triton('triton_per_fused__log_softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1024, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_3(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 784
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (10*x0)), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + (10*x0)), tmp12, rmask & xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (10, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (10, ), (1, ))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (20, 10, 5, 5), (250, 25, 5, 1))
assert_size_stride(primals_5, (20, ), (1, ))
assert_size_stride(primals_6, (50, 320), (320, 1))
assert_size_stride(primals_7, (50, ), (1, ))
assert_size_stride(primals_8, (10, 50), (50, 1))
assert_size_stride(primals_9, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 10, 60, 60), (36000, 3600, 60, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 144000, grid=grid(144000), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 20, 56, 56), (62720, 3136, 56, 1))
buf3 = buf2; del buf2 # reuse
buf10 = empty_strided_cuda((4, 20, 56, 56), (64000, 3200, 56, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_1.run(buf3, primals_5, buf10, 250880, grid=grid(250880), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((784, 50), (50, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (784, 320), (320, 1), 0), reinterpret_tensor(primals_6, (320, 50), (1, 320), 0), out=buf4)
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
triton_poi_fused_relu_2.run(buf5, primals_7, 39200, grid=grid(39200), stream=stream0)
del primals_7
buf6 = empty_strided_cuda((784, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, buf5, reinterpret_tensor(primals_8, (50, 10), (1, 50), 0), alpha=1, beta=1, out=buf6)
del primals_9
buf9 = empty_strided_cuda((784, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
triton_per_fused__log_softmax_3.run(buf6, buf9, 784, 10, grid=grid(784), stream=stream0)
del buf6
return (buf9, primals_1, primals_3, primals_4, buf1, reinterpret_tensor(buf3, (784, 320), (320, 1), 0), buf5, buf9, primals_8, primals_6, buf10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((10, 1, 5, 5), (25, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((20, 10, 5, 5), (250, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((50, 320), (320, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((50, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((10, 50), (50, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn
import torch.utils.data
import torch.utils.tensorboard._pytorch_graph
import torch.onnx.symbolic_caffe2
class SmallMnistNoDropout(nn.Module):
def __init__(self):
super(SmallMnistNoDropout, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.relu2 = nn.ReLU()
self.fc1 = nn.Linear(320, 50)
self.relu3 = nn.ReLU()
self.fc2 = nn.Linear(50, 10)
self.log_softmax = nn.LogSoftmax(dim=1)
def forward(self, x):
x = self.relu1(self.conv1(x))
x = self.relu2(self.conv2(x))
x = x.view(-1, 320)
x = self.relu3(self.fc1(x))
x = self.fc2(x)
return self.log_softmax(x)
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn
import torch.utils.data
import torch.utils.tensorboard._pytorch_graph
import torch.onnx.symbolic_caffe2
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 144000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3600 % 10
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 250880
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = xindex // 3136 % 20
x0 = xindex % 3136
x3 = xindex // 3136
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x0 + 3200 * x3), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 39200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 50
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__log_softmax_3(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 784
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (10, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (10,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (20, 10, 5, 5), (250, 25, 5, 1))
assert_size_stride(primals_5, (20,), (1,))
assert_size_stride(primals_6, (50, 320), (320, 1))
assert_size_stride(primals_7, (50,), (1,))
assert_size_stride(primals_8, (10, 50), (50, 1))
assert_size_stride(primals_9, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 10, 60, 60), (36000, 3600, 60, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(144000)](buf1, primals_2,
144000, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 20, 56, 56), (62720, 3136, 56, 1))
buf3 = buf2
del buf2
buf10 = empty_strided_cuda((4, 20, 56, 56), (64000, 3200, 56, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_1[grid(250880)](
buf3, primals_5, buf10, 250880, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_5
buf4 = empty_strided_cuda((784, 50), (50, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (784, 320), (320, 1), 0),
reinterpret_tensor(primals_6, (320, 50), (1, 320), 0), out=buf4)
buf5 = buf4
del buf4
triton_poi_fused_relu_2[grid(39200)](buf5, primals_7, 39200, XBLOCK
=512, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((784, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_9, buf5, reinterpret_tensor(primals_8,
(50, 10), (1, 50), 0), alpha=1, beta=1, out=buf6)
del primals_9
buf9 = empty_strided_cuda((784, 10), (10, 1), torch.float32)
triton_per_fused__log_softmax_3[grid(784)](buf6, buf9, 784, 10,
XBLOCK=32, num_warps=4, num_stages=1)
del buf6
return buf9, primals_1, primals_3, primals_4, buf1, reinterpret_tensor(buf3
, (784, 320), (320, 1), 0), buf5, buf9, primals_8, primals_6, buf10
class SmallMnistNoDropoutNew(nn.Module):
def __init__(self):
super(SmallMnistNoDropoutNew, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.relu2 = nn.ReLU()
self.fc1 = nn.Linear(320, 50)
self.relu3 = nn.ReLU()
self.fc2 = nn.Linear(50, 10)
self.log_softmax = nn.LogSoftmax(dim=1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| Rohan-Chaudhury/aimet | SmallMnistNoDropout | false | 17,952 | [
"BSD-3-Clause"
] | 3 | 1c38cac8cc0fd32dca40ce5e39940805d29f7a4a | https://github.com/Rohan-Chaudhury/aimet/tree/1c38cac8cc0fd32dca40ce5e39940805d29f7a4a |
SmallMnist | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/ul/culfeun7wmrsya4fv22he6lp67htodmnmbg4achhwyybqeqe3ljt.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 144000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 3600) % 10
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/5l/c5lfmsfuebobasgmahlh54mpbwcpvkqq3xtpdx5es3dj6cpvjw77.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => convolution_1
# x_2 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 250880
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = (xindex // 3136) % 20
x0 = xindex % 3136
x3 = (xindex // 3136)
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x0 + (3200*x3)), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ov/covxr2juktxjwdz3javvk6wtjtgx3iticwwsytpotrpg5czkgast.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_4 => relu_2
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_7), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_2 = async_compile.triton('triton_poi_fused_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 39200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 50
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/i7/ci7jltkvpyv54u2b2qwcmikhqlfg7bmual2jgwjmrckawmdx6w76.py
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => amax, exp, log, sub, sub_1, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_1, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_1, %amax), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_per_fused__log_softmax_3 = async_compile.triton('triton_per_fused__log_softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1024, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_3(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 784
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (10*x0)), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + (10*x0)), tmp12, rmask & xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (10, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (10, ), (1, ))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (20, 10, 5, 5), (250, 25, 5, 1))
assert_size_stride(primals_5, (20, ), (1, ))
assert_size_stride(primals_6, (50, 320), (320, 1))
assert_size_stride(primals_7, (50, ), (1, ))
assert_size_stride(primals_8, (10, 50), (50, 1))
assert_size_stride(primals_9, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 10, 60, 60), (36000, 3600, 60, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 144000, grid=grid(144000), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 20, 56, 56), (62720, 3136, 56, 1))
buf3 = buf2; del buf2 # reuse
buf10 = empty_strided_cuda((4, 20, 56, 56), (64000, 3200, 56, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_1.run(buf3, primals_5, buf10, 250880, grid=grid(250880), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((784, 50), (50, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (784, 320), (320, 1), 0), reinterpret_tensor(primals_6, (320, 50), (1, 320), 0), out=buf4)
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
triton_poi_fused_relu_2.run(buf5, primals_7, 39200, grid=grid(39200), stream=stream0)
del primals_7
buf6 = empty_strided_cuda((784, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, buf5, reinterpret_tensor(primals_8, (50, 10), (1, 50), 0), alpha=1, beta=1, out=buf6)
del primals_9
buf9 = empty_strided_cuda((784, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
triton_per_fused__log_softmax_3.run(buf6, buf9, 784, 10, grid=grid(784), stream=stream0)
del buf6
return (buf9, primals_1, primals_3, primals_4, buf1, reinterpret_tensor(buf3, (784, 320), (320, 1), 0), buf5, buf9, primals_8, primals_6, buf10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((10, 1, 5, 5), (25, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((20, 10, 5, 5), (250, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((50, 320), (320, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((50, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((10, 50), (50, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn
import torch.utils.data
import torch.utils.tensorboard._pytorch_graph
import torch.onnx.symbolic_caffe2
class SmallMnist(nn.Module):
def __init__(self):
super(SmallMnist, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.relu2 = nn.ReLU()
self.fc1 = nn.Linear(320, 50)
self.relu3 = nn.ReLU()
self.dropout = nn.Dropout()
self.fc2 = nn.Linear(50, 10)
self.log_softmax = nn.LogSoftmax(dim=1)
def forward(self, x):
x = self.relu1(self.conv1(x))
x = self.conv2(x)
x = self.relu2(self.conv2_drop(x))
x = x.view(-1, 320)
x = self.relu3(self.fc1(x))
x = self.dropout(x)
x = self.fc2(x)
return self.log_softmax(x)
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn
import torch.utils.data
import torch.utils.tensorboard._pytorch_graph
import torch.onnx.symbolic_caffe2
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 144000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3600 % 10
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 250880
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = xindex // 3136 % 20
x0 = xindex % 3136
x3 = xindex // 3136
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x0 + 3200 * x3), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 39200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 50
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__log_softmax_3(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 784
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (10, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (10,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (20, 10, 5, 5), (250, 25, 5, 1))
assert_size_stride(primals_5, (20,), (1,))
assert_size_stride(primals_6, (50, 320), (320, 1))
assert_size_stride(primals_7, (50,), (1,))
assert_size_stride(primals_8, (10, 50), (50, 1))
assert_size_stride(primals_9, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 10, 60, 60), (36000, 3600, 60, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(144000)](buf1, primals_2,
144000, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 20, 56, 56), (62720, 3136, 56, 1))
buf3 = buf2
del buf2
buf10 = empty_strided_cuda((4, 20, 56, 56), (64000, 3200, 56, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_1[grid(250880)](
buf3, primals_5, buf10, 250880, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_5
buf4 = empty_strided_cuda((784, 50), (50, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (784, 320), (320, 1), 0),
reinterpret_tensor(primals_6, (320, 50), (1, 320), 0), out=buf4)
buf5 = buf4
del buf4
triton_poi_fused_relu_2[grid(39200)](buf5, primals_7, 39200, XBLOCK
=512, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((784, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_9, buf5, reinterpret_tensor(primals_8,
(50, 10), (1, 50), 0), alpha=1, beta=1, out=buf6)
del primals_9
buf9 = empty_strided_cuda((784, 10), (10, 1), torch.float32)
triton_per_fused__log_softmax_3[grid(784)](buf6, buf9, 784, 10,
XBLOCK=32, num_warps=4, num_stages=1)
del buf6
return buf9, primals_1, primals_3, primals_4, buf1, reinterpret_tensor(buf3
, (784, 320), (320, 1), 0), buf5, buf9, primals_8, primals_6, buf10
class SmallMnistNew(nn.Module):
def __init__(self):
super(SmallMnistNew, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.relu2 = nn.ReLU()
self.fc1 = nn.Linear(320, 50)
self.relu3 = nn.ReLU()
self.dropout = nn.Dropout()
self.fc2 = nn.Linear(50, 10)
self.log_softmax = nn.LogSoftmax(dim=1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| Rohan-Chaudhury/aimet | SmallMnist | false | 17,953 | [
"BSD-3-Clause"
] | 3 | 1c38cac8cc0fd32dca40ce5e39940805d29f7a4a | https://github.com/Rohan-Chaudhury/aimet/tree/1c38cac8cc0fd32dca40ce5e39940805d29f7a4a |
SingleBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/ar/carwklq7cbuokoe7tc7kt36gj77xcutunqda57p35zikdkxy6aec.py
# Topologically Sorted Source Nodes: [x_1, cat_tgt_1], Original ATen: [aten.relu, aten.cat]
# Source node to ATen node mapping:
# cat_tgt_1 => cat_7
# x_1 => relu
# Graph fragment:
# %relu : [num_users=6] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %cat_7 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%relu, %cat_6], 2), kwargs = {})
triton_poi_fused_cat_relu_0 = async_compile.triton('triton_poi_fused_cat_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_relu_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x0 + (8*x1)), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/2f/c2fdjj4ffoa24qdrhmm2rlzpkphtxofjp54y47wnmyr3rx3f7mht.py
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_7 => relu_3
# Graph fragment:
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_7,), kwargs = {})
# %le_38 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/hw/chw4govxqvfjbmqbvoy4ipdvz2mtbabqvuyq36fl2gdfe3hx6atb.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_5 => relu_2
# Graph fragment:
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_5,), kwargs = {})
# %le_39 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/wj/cwjd35o2nbar5uat5nbemgsct5x6hpw74gha2u7llss4zgezvg5t.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
# Source node to ATen node mapping:
# matmul => bmm
# Graph fragment:
# %bmm : [num_users=2] = call_function[target=torch.ops.aten.bmm.default](args = (%expand, %expand_1), kwargs = {})
triton_poi_fused_bmm_3 = async_compile.triton('triton_poi_fused_bmm_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_bmm_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/hw/chwdconlmertbx42z7cibytldsadkwdj37yq2rqql6c3vmyll7ho.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
# Source node to ATen node mapping:
# matmul => bmm
# Graph fragment:
# %bmm : [num_users=2] = call_function[target=torch.ops.aten.bmm.default](args = (%expand, %expand_1), kwargs = {})
triton_poi_fused_bmm_4 = async_compile.triton('triton_poi_fused_bmm_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_bmm_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (8*x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/6j/c6j7voowlizl3lwh5smid6azjbdk6lo3lduawqtcpnjh7ffci33h.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => exp
# Graph fragment:
# %mul_tensor_63 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {})
# %amax_default_63 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_63, [2], True), kwargs = {})
# %sub_tensor_63 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_63, %amax_default_63), kwargs = {})
# %div_tensor_63 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_63, 1.0), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_63,), kwargs = {})
triton_poi_fused__softmax_5 = async_compile.triton('triton_poi_fused__softmax_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/f6/cf6i5h75finv5fv32sposxwuxj3aceu5fo4gdr4qogbkdqo5ar6a.py
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
# Source node to ATen node mapping:
# matmul_1 => bmm_1
# Graph fragment:
# %bmm_1 : [num_users=2] = call_function[target=torch.ops.aten.bmm.default](args = (%expand_2, %expand_3), kwargs = {})
triton_poi_fused_bmm_6 = async_compile.triton('triton_poi_fused_bmm_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_bmm_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/4d/c4dlgoxftgi3cwvist4a23fzdvkv53ruxvqbg3ece556n6auqgju.py
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
# Source node to ATen node mapping:
# matmul_1 => bmm_1
# Graph fragment:
# %bmm_1 : [num_users=2] = call_function[target=torch.ops.aten.bmm.default](args = (%expand_2, %expand_3), kwargs = {})
triton_poi_fused_bmm_7 = async_compile.triton('triton_poi_fused_bmm_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_bmm_7(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (1 + (8*x0)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/rq/crq36oske4qfeul6snnmv4iknrolqgt6n7ncuc2zjy26qa72g2vu.py
# Topologically Sorted Source Nodes: [tgt_update_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# tgt_update_1 => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%sum_2, %sum_4], 2), kwargs = {})
triton_poi_fused_cat_8 = async_compile.triton('triton_poi_fused_cat_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x3 = (xindex // 2)
x2 = (xindex // 8)
x5 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4*x3), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (1 + (4*x3)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr0 + (2 + (4*x3)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp9 = tmp7 + tmp8
tmp10 = tl.load(in_ptr0 + (3 + (4*x3)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tmp5 / tmp11
tmp13 = tl.load(in_ptr1 + (4 + (32*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp14 = tmp12 * tmp13
tmp15 = tmp6 / tmp11
tmp16 = tl.load(in_ptr1 + (12 + (32*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp19 = tmp8 / tmp11
tmp20 = tl.load(in_ptr1 + (20 + (32*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp10 / tmp11
tmp24 = tl.load(in_ptr1 + (28 + (32*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype)
tmp28 = tl.where(tmp4, tmp26, tmp27)
tmp29 = tmp0 >= tmp3
tmp30 = tl.full([1], 2, tl.int64)
tmp31 = tmp0 < tmp30
tmp32 = tl.load(in_ptr2 + (4*x3), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp33 = tl.load(in_ptr2 + (1 + (4*x3)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp34 = tmp32 + tmp33
tmp35 = tl.load(in_ptr2 + (2 + (4*x3)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp36 = tmp34 + tmp35
tmp37 = tl.load(in_ptr2 + (3 + (4*x3)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp38 = tmp36 + tmp37
tmp39 = tmp32 / tmp38
tmp40 = tl.load(in_ptr1 + (5 + (32*x2)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp41 = tmp39 * tmp40
tmp42 = tmp33 / tmp38
tmp43 = tl.load(in_ptr1 + (13 + (32*x2)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp44 = tmp42 * tmp43
tmp45 = tmp41 + tmp44
tmp46 = tmp35 / tmp38
tmp47 = tl.load(in_ptr1 + (21 + (32*x2)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp48 = tmp46 * tmp47
tmp49 = tmp45 + tmp48
tmp50 = tmp37 / tmp38
tmp51 = tl.load(in_ptr1 + (29 + (32*x2)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp52 = tmp50 * tmp51
tmp53 = tmp49 + tmp52
tmp54 = tl.full(tmp53.shape, 0.0, tmp53.dtype)
tmp55 = tl.where(tmp29, tmp53, tmp54)
tmp56 = tl.where(tmp4, tmp28, tmp55)
tl.store(out_ptr0 + (x5), tmp56, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ws/cwsxcklgfoujw6vbhp3j3dxoepgcyucgputomom24gx4p3gmfi3u.py
# Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.bmm]
# Source node to ATen node mapping:
# matmul_2 => bmm_2
# Graph fragment:
# %bmm_2 : [num_users=2] = call_function[target=torch.ops.aten.bmm.default](args = (%expand_4, %expand_5), kwargs = {})
triton_poi_fused_bmm_9 = async_compile.triton('triton_poi_fused_bmm_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_bmm_9(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ei/ceizsrzjnlwqpwtjmprxplhvlqlyyhcq6xkhwanrotlowamkosbj.py
# Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.bmm]
# Source node to ATen node mapping:
# matmul_2 => bmm_2
# Graph fragment:
# %bmm_2 : [num_users=2] = call_function[target=torch.ops.aten.bmm.default](args = (%expand_4, %expand_5), kwargs = {})
triton_poi_fused_bmm_10 = async_compile.triton('triton_poi_fused_bmm_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_bmm_10(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (2 + (8*x0)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/yq/cyqqfu4xx62gevdoalf4f44ndqa3k2vh6va5wcujqd35xznkcgtz.py
# Topologically Sorted Source Nodes: [tgt_update_2], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# tgt_update_2 => cat_1
# Graph fragment:
# %cat_1 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%cat, %sum_6], 2), kwargs = {})
triton_poi_fused_cat_11 = async_compile.triton('triton_poi_fused_cat_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_11(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x3 = (xindex // 3)
x2 = (xindex // 12)
x5 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((2*x3) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 3, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (4*x3), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.load(in_ptr1 + (1 + (4*x3)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.load(in_ptr1 + (2 + (4*x3)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.load(in_ptr1 + (3 + (4*x3)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tmp13 + tmp14
tmp16 = tmp9 / tmp15
tmp17 = tl.load(in_ptr2 + (6 + (32*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tmp16 * tmp17
tmp19 = tmp10 / tmp15
tmp20 = tl.load(in_ptr2 + (14 + (32*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp12 / tmp15
tmp24 = tl.load(in_ptr2 + (22 + (32*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tmp14 / tmp15
tmp28 = tl.load(in_ptr2 + (30 + (32*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp29 = tmp27 * tmp28
tmp30 = tmp26 + tmp29
tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype)
tmp32 = tl.where(tmp6, tmp30, tmp31)
tmp33 = tl.where(tmp4, tmp5, tmp32)
tl.store(out_ptr0 + (x5), tmp33, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/zh/czhmvy4wstee4etc6wleiaxgv3mnn3op7ger72nlupsv6nn2mrkw.py
# Topologically Sorted Source Nodes: [matmul_3], Original ATen: [aten.bmm]
# Source node to ATen node mapping:
# matmul_3 => bmm_3
# Graph fragment:
# %bmm_3 : [num_users=2] = call_function[target=torch.ops.aten.bmm.default](args = (%expand_6, %expand_7), kwargs = {})
triton_poi_fused_bmm_12 = async_compile.triton('triton_poi_fused_bmm_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_bmm_12(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/zn/cznxx2olfrny3thdekcpvhevhouppzux2fco5ounkgashsu2sdkw.py
# Topologically Sorted Source Nodes: [matmul_3], Original ATen: [aten.bmm]
# Source node to ATen node mapping:
# matmul_3 => bmm_3
# Graph fragment:
# %bmm_3 : [num_users=2] = call_function[target=torch.ops.aten.bmm.default](args = (%expand_6, %expand_7), kwargs = {})
triton_poi_fused_bmm_13 = async_compile.triton('triton_poi_fused_bmm_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_bmm_13(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (3 + (8*x0)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/nb/cnbcemdksrxgo2luubipkp6kp7jhglsgnvvl234lp7ukqvcsr6xp.py
# Topologically Sorted Source Nodes: [tgt_update_3], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# tgt_update_3 => cat_2
# Graph fragment:
# %cat_2 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%cat_1, %sum_8], 2), kwargs = {})
triton_poi_fused_cat_14 = async_compile.triton('triton_poi_fused_cat_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_14(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x3 = (xindex // 4)
x2 = (xindex // 16)
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 3, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((3*x3) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 4, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (4*x3), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.load(in_ptr1 + (1 + (4*x3)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.load(in_ptr1 + (2 + (4*x3)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.load(in_ptr1 + (3 + (4*x3)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tmp13 + tmp14
tmp16 = tmp9 / tmp15
tmp17 = tl.load(in_ptr2 + (7 + (32*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tmp16 * tmp17
tmp19 = tmp10 / tmp15
tmp20 = tl.load(in_ptr2 + (15 + (32*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp12 / tmp15
tmp24 = tl.load(in_ptr2 + (23 + (32*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tmp14 / tmp15
tmp28 = tl.load(in_ptr2 + (31 + (32*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp29 = tmp27 * tmp28
tmp30 = tmp26 + tmp29
tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype)
tmp32 = tl.where(tmp6, tmp30, tmp31)
tmp33 = tl.where(tmp4, tmp5, tmp32)
tl.store(out_ptr0 + (x0 + (8*x3)), tmp33, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/di/cdinpuobtvxumk76uztfkgslzj7aec5e7vtt5ukicgjqp2rhjkif.py
# Topologically Sorted Source Nodes: [x_9, add_1], Original ATen: [aten.relu, aten.add]
# Source node to ATen node mapping:
# add_1 => add_1
# x_9 => relu_4
# Graph fragment:
# %relu_4 : [num_users=5] = call_function[target=torch.ops.aten.relu.default](args = (%view_21,), kwargs = {})
# %add_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu_4, %relu_1), kwargs = {})
triton_poi_fused_add_relu_15 = async_compile.triton('triton_poi_fused_add_relu_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_15', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_relu_15(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp4 + tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/6x/c6xlve2lzm7dhelmklk2wqxiksakyrz6dwl35lsoriw4l5yq4ib2.py
# Topologically Sorted Source Nodes: [x_15, add, sum_9, v_mean], Original ATen: [aten.relu, aten.add, aten.sum, aten.div]
# Source node to ATen node mapping:
# add => add
# sum_9 => sum_17
# v_mean => div_16
# x_15 => relu_7
# Graph fragment:
# %relu_7 : [num_users=4] = call_function[target=torch.ops.aten.relu.default](args = (%view_39,), kwargs = {})
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu_7, %relu), kwargs = {})
# %sum_17 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add, [1]), kwargs = {})
# %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_17, 4), kwargs = {})
triton_poi_fused_add_div_relu_sum_16 = async_compile.triton('triton_poi_fused_add_div_relu_sum_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_relu_sum_16', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_relu_sum_16(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x0 + (16*x1)), xmask)
tmp7 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp10 = tl.load(in_ptr2 + (4 + x0 + (16*x1)), xmask)
tmp13 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp16 = tl.load(in_ptr2 + (8 + x0 + (16*x1)), xmask)
tmp19 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp22 = tl.load(in_ptr2 + (12 + x0 + (16*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp4 + tmp5
tmp8 = tmp7 + tmp1
tmp9 = triton_helpers.maximum(tmp3, tmp8)
tmp11 = tmp9 + tmp10
tmp12 = tmp6 + tmp11
tmp14 = tmp13 + tmp1
tmp15 = triton_helpers.maximum(tmp3, tmp14)
tmp17 = tmp15 + tmp16
tmp18 = tmp12 + tmp17
tmp20 = tmp19 + tmp1
tmp21 = triton_helpers.maximum(tmp3, tmp20)
tmp23 = tmp21 + tmp22
tmp24 = tmp18 + tmp23
tmp25 = 0.25
tmp26 = tmp24 * tmp25
tl.store(in_out_ptr0 + (x2), tmp26, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/s2/cs2p46bqmrklv4hmybgire6cufsgbavlwmoljy2c3pdk5u4tpddp.py
# Topologically Sorted Source Nodes: [add_1, sum_10, q_mean], Original ATen: [aten.add, aten.sum, aten.div]
# Source node to ATen node mapping:
# add_1 => add_1
# q_mean => div_17
# sum_10 => sum_18
# Graph fragment:
# %add_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu_4, %relu_1), kwargs = {})
# %sum_18 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add_1, [1]), kwargs = {})
# %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_18, 4), kwargs = {})
triton_poi_fused_add_div_sum_17 = async_compile.triton('triton_poi_fused_add_div_sum_17', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_sum_17', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_sum_17(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (16*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp4 = tl.load(in_ptr1 + (4 + x0 + (16*x1)), xmask)
tmp7 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp8 = tl.load(in_ptr1 + (8 + x0 + (16*x1)), xmask)
tmp11 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp12 = tl.load(in_ptr1 + (12 + x0 + (16*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/5n/c5n2n25s737ww7x55eqmhpwmepynzbzrx4ask6k24lqlqqxoa2m4.py
# Topologically Sorted Source Nodes: [x_15, add], Original ATen: [aten.relu, aten.add]
# Source node to ATen node mapping:
# add => add
# x_15 => relu_7
# Graph fragment:
# %relu_7 : [num_users=4] = call_function[target=torch.ops.aten.relu.default](args = (%view_39,), kwargs = {})
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu_7, %relu), kwargs = {})
triton_poi_fused_add_relu_18 = async_compile.triton('triton_poi_fused_add_relu_18', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_18', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_relu_18(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp4 + tmp5
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/qz/cqzdhw7awh2icpma6meffruwuyw2caucguxthhwi452htc4t4iap.py
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_19 => relu_8
# Graph fragment:
# %relu_8 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_41,), kwargs = {})
# %le_33 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_8, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_19 = async_compile.triton('triton_poi_fused_relu_threshold_backward_19', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_19', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_19(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 12
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/2w/c2w3cax6cljhwwxdbu5yrfzw3i6px7csj3kd22fg4u6hb52mbe3r.py
# Topologically Sorted Source Nodes: [add_2, gated_v_query, gated_v_key, gated_v_val], Original ATen: [aten.add, aten.mul]
# Source node to ATen node mapping:
# add_2 => add_2
# gated_v_key => mul_9
# gated_v_query => mul_8
# gated_v_val => mul_10
# Graph fragment:
# %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%unsqueeze_17, 1), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, %getitem_29), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, %getitem_28), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, %getitem_30), kwargs = {})
triton_poi_fused_add_mul_20 = async_compile.triton('triton_poi_fused_add_mul_20', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_20', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_20(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = (xindex // 16)
x3 = (xindex // 4)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (4 + x0 + (12*x3)), xmask)
tmp6 = tl.load(in_ptr1 + (x0 + (12*x3)), xmask)
tmp8 = tl.load(in_ptr1 + (8 + x0 + (12*x3)), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp5 = tmp3 * tmp4
tmp7 = tmp3 * tmp6
tmp9 = tmp3 * tmp8
tl.store(out_ptr0 + (x4), tmp5, xmask)
tl.store(out_ptr1 + (x4), tmp7, xmask)
tl.store(out_ptr2 + (x4), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/lh/clh742lzdo7pnvviu5a5uhca63f6iq2igdlhx7u6uw7la3pibbei.py
# Topologically Sorted Source Nodes: [v_update_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# v_update_1 => cat_8
# Graph fragment:
# %cat_8 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%sum_21, %sum_25], 2), kwargs = {})
triton_poi_fused_cat_21 = async_compile.triton('triton_poi_fused_cat_21', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_21', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_21(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x3 = (xindex // 2)
x2 = (xindex // 8)
x5 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4*x3), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (1 + (4*x3)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr0 + (2 + (4*x3)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp9 = tmp7 + tmp8
tmp10 = tl.load(in_ptr0 + (3 + (4*x3)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tmp5 / tmp11
tmp13 = tl.load(in_ptr1 + (16*x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp14 = tmp12 * tmp13
tmp15 = tmp6 / tmp11
tmp16 = tl.load(in_ptr1 + (4 + (16*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp19 = tmp8 / tmp11
tmp20 = tl.load(in_ptr1 + (8 + (16*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp10 / tmp11
tmp24 = tl.load(in_ptr1 + (12 + (16*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype)
tmp28 = tl.where(tmp4, tmp26, tmp27)
tmp29 = tmp0 >= tmp3
tmp30 = tl.full([1], 2, tl.int64)
tmp31 = tmp0 < tmp30
tmp32 = tl.load(in_ptr2 + (4*x3), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp33 = tl.load(in_ptr2 + (1 + (4*x3)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp34 = tmp32 + tmp33
tmp35 = tl.load(in_ptr2 + (2 + (4*x3)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp36 = tmp34 + tmp35
tmp37 = tl.load(in_ptr2 + (3 + (4*x3)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp38 = tmp36 + tmp37
tmp39 = tmp32 / tmp38
tmp40 = tl.load(in_ptr1 + (1 + (16*x2)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp41 = tmp39 * tmp40
tmp42 = tmp33 / tmp38
tmp43 = tl.load(in_ptr1 + (5 + (16*x2)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp44 = tmp42 * tmp43
tmp45 = tmp41 + tmp44
tmp46 = tmp35 / tmp38
tmp47 = tl.load(in_ptr1 + (9 + (16*x2)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp48 = tmp46 * tmp47
tmp49 = tmp45 + tmp48
tmp50 = tmp37 / tmp38
tmp51 = tl.load(in_ptr1 + (13 + (16*x2)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp52 = tmp50 * tmp51
tmp53 = tmp49 + tmp52
tmp54 = tl.full(tmp53.shape, 0.0, tmp53.dtype)
tmp55 = tl.where(tmp29, tmp53, tmp54)
tmp56 = tl.where(tmp4, tmp28, tmp55)
tl.store(out_ptr0 + (x5), tmp56, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/rp/crpfzciuvyvby3gf4srbaago2f2eyb4kz4ffxrdrxtyazce4pof2.py
# Topologically Sorted Source Nodes: [v_update_2], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# v_update_2 => cat_10
# Graph fragment:
# %cat_10 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%cat_8, %sum_29], 2), kwargs = {})
triton_poi_fused_cat_22 = async_compile.triton('triton_poi_fused_cat_22', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_22', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_22(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x3 = (xindex // 3)
x2 = (xindex // 12)
x5 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((2*x3) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 3, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (4*x3), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.load(in_ptr1 + (1 + (4*x3)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.load(in_ptr1 + (2 + (4*x3)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.load(in_ptr1 + (3 + (4*x3)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tmp13 + tmp14
tmp16 = tmp9 / tmp15
tmp17 = tl.load(in_ptr2 + (2 + (16*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tmp16 * tmp17
tmp19 = tmp10 / tmp15
tmp20 = tl.load(in_ptr2 + (6 + (16*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp12 / tmp15
tmp24 = tl.load(in_ptr2 + (10 + (16*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tmp14 / tmp15
tmp28 = tl.load(in_ptr2 + (14 + (16*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp29 = tmp27 * tmp28
tmp30 = tmp26 + tmp29
tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype)
tmp32 = tl.where(tmp6, tmp30, tmp31)
tmp33 = tl.where(tmp4, tmp5, tmp32)
tl.store(out_ptr0 + (x5), tmp33, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ng/cng57ecqkyvtd3tqxxj2dyeju7r6t5w253jqm4txr44wm5u7rvdq.py
# Topologically Sorted Source Nodes: [v_update_3, add_8], Original ATen: [aten.cat, aten.add]
# Source node to ATen node mapping:
# add_8 => add_8
# v_update_3 => cat_12
# Graph fragment:
# %cat_12 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%cat_10, %sum_33], 2), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %cat_12), kwargs = {})
triton_poi_fused_add_cat_23 = async_compile.triton('triton_poi_fused_add_cat_23', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_cat_23', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_cat_23(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x4 = (xindex // 4)
x2 = (xindex // 16)
x3 = xindex
tmp34 = tl.load(in_ptr3 + (x3), xmask)
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 3, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((3*x4) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 4, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (4*x4), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.load(in_ptr1 + (1 + (4*x4)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.load(in_ptr1 + (2 + (4*x4)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.load(in_ptr1 + (3 + (4*x4)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tmp13 + tmp14
tmp16 = tmp9 / tmp15
tmp17 = tl.load(in_ptr2 + (3 + (16*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tmp16 * tmp17
tmp19 = tmp10 / tmp15
tmp20 = tl.load(in_ptr2 + (7 + (16*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp12 / tmp15
tmp24 = tl.load(in_ptr2 + (11 + (16*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tmp14 / tmp15
tmp28 = tl.load(in_ptr2 + (15 + (16*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp29 = tmp27 * tmp28
tmp30 = tmp26 + tmp29
tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype)
tmp32 = tl.where(tmp6, tmp30, tmp31)
tmp33 = tl.where(tmp4, tmp5, tmp32)
tmp35 = tmp34 + tmp33
tl.store(in_out_ptr0 + (x3), tmp35, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/sw/cswyj2ecuyn6doowyttnfswqxklqxzlv75xfgpeev2nv7krt5qmz.py
# Topologically Sorted Source Nodes: [x_15, x_23, add_10, add_11, cat_tgt_3], Original ATen: [aten.relu, aten.add, aten.cat]
# Source node to ATen node mapping:
# add_10 => add_10
# add_11 => add_11
# cat_tgt_3 => cat_21
# x_15 => relu_7
# x_23 => relu_10
# Graph fragment:
# %relu_7 : [num_users=4] = call_function[target=torch.ops.aten.relu.default](args = (%view_39,), kwargs = {})
# %relu_10 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%view_69,), kwargs = {})
# %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu_10, %relu_7), kwargs = {})
# %add_11 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_10, %relu), kwargs = {})
# %cat_21 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%add_11, %cat_20], 2), kwargs = {})
triton_poi_fused_add_cat_relu_24 = async_compile.triton('triton_poi_fused_add_cat_relu_24', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_cat_relu_24', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_cat_relu_24(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x2), xmask)
tmp6 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp7 = tmp5 + tmp6
tmp8 = triton_helpers.maximum(tmp3, tmp7)
tmp9 = tmp4 + tmp8
tmp11 = tmp9 + tmp10
tl.store(out_ptr0 + (x2), tmp11, xmask)
tl.store(out_ptr1 + (x0 + (8*x1)), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/iv/civmaklkikju5ue5kkey7hke53rxgo3nigkrgb6tavm3h5vs4kx4.py
# Topologically Sorted Source Nodes: [x_25, add_12, add_13, cat_tgt_2], Original ATen: [aten.relu, aten.add, aten.cat]
# Source node to ATen node mapping:
# add_12 => add_12
# add_13 => add_13
# cat_tgt_2 => cat_17
# x_25 => relu_11
# Graph fragment:
# %relu_11 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%view_71,), kwargs = {})
# %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu_11, %relu_4), kwargs = {})
# %add_13 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_12, %relu_1), kwargs = {})
# %cat_17 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%add_13, %cat_16], 2), kwargs = {})
triton_poi_fused_add_cat_relu_25 = async_compile.triton('triton_poi_fused_add_cat_relu_25', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_cat_relu_25', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_cat_relu_25(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x2), xmask)
tmp7 = tl.load(in_ptr3 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
tl.store(out_ptr1 + (x0 + (8*x1)), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/or/corxkrrvp7unncosf6likaokkbn2rkrr6owctqru5fwi5invbath.py
# Topologically Sorted Source Nodes: [x_15, x_23, x_37, x_45, x_59, x_67, x_81, x_89, add_56, add_57, add_58, add_59, add_60, add_61, add_62, add_63, add_64], Original ATen: [aten.relu, aten.add, aten.threshold_backward]
# Source node to ATen node mapping:
# add_56 => add_56
# add_57 => add_57
# add_58 => add_58
# add_59 => add_59
# add_60 => add_60
# add_61 => add_61
# add_62 => add_62
# add_63 => add_63
# add_64 => add_64
# x_15 => relu_7
# x_23 => relu_10
# x_37 => relu_17
# x_45 => relu_20
# x_59 => relu_27
# x_67 => relu_30
# x_81 => relu_37
# x_89 => relu_40
# Graph fragment:
# %relu_7 : [num_users=4] = call_function[target=torch.ops.aten.relu.default](args = (%view_39,), kwargs = {})
# %relu_10 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%view_69,), kwargs = {})
# %relu_17 : [num_users=4] = call_function[target=torch.ops.aten.relu.default](args = (%view_107,), kwargs = {})
# %relu_20 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%view_137,), kwargs = {})
# %relu_27 : [num_users=4] = call_function[target=torch.ops.aten.relu.default](args = (%view_175,), kwargs = {})
# %relu_30 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%view_205,), kwargs = {})
# %relu_37 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%view_243,), kwargs = {})
# %relu_40 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_273,), kwargs = {})
# %add_56 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu, 0), kwargs = {})
# %add_57 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_56, %relu_7), kwargs = {})
# %add_58 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_57, %relu_10), kwargs = {})
# %add_59 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_58, %relu_17), kwargs = {})
# %add_60 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_59, %relu_20), kwargs = {})
# %add_61 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_60, %relu_27), kwargs = {})
# %add_62 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_61, %relu_30), kwargs = {})
# %add_63 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_62, %relu_37), kwargs = {})
# %add_64 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_63, %relu_40), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_40, 0), kwargs = {})
# %le_4 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_37, 0), kwargs = {})
# %le_11 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_30, 0), kwargs = {})
# %le_14 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_27, 0), kwargs = {})
# %le_21 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_20, 0), kwargs = {})
# %le_24 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_17, 0), kwargs = {})
# %le_31 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_10, 0), kwargs = {})
# %le_34 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_7, 0), kwargs = {})
# %le_41 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_add_relu_threshold_backward_26 = async_compile.triton('triton_poi_fused_add_relu_threshold_backward_26', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*i1', 13: '*i1', 14: '*i1', 15: '*i1', 16: '*i1', 17: '*i1', 18: '*i1', 19: '*i1', 20: '*i1', 21: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_threshold_backward_26', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 11, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_26(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr3 + (x2), xmask)
tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr5 + (x2), xmask)
tmp18 = tl.load(in_ptr6 + (x2), xmask)
tmp22 = tl.load(in_ptr7 + (x2), xmask)
tmp26 = tl.load(in_ptr8 + (x2), xmask)
tmp30 = tl.load(in_ptr9 + (x2), xmask)
tmp34 = tl.load(in_ptr10 + (x2), xmask)
tmp1 = 0.0
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tl.full([1], 0, tl.int32)
tmp7 = triton_helpers.maximum(tmp6, tmp5)
tmp8 = tmp2 + tmp7
tmp11 = tmp9 + tmp10
tmp12 = triton_helpers.maximum(tmp6, tmp11)
tmp13 = tmp8 + tmp12
tmp15 = tmp14 + tmp4
tmp16 = triton_helpers.maximum(tmp6, tmp15)
tmp17 = tmp13 + tmp16
tmp19 = tmp18 + tmp10
tmp20 = triton_helpers.maximum(tmp6, tmp19)
tmp21 = tmp17 + tmp20
tmp23 = tmp22 + tmp4
tmp24 = triton_helpers.maximum(tmp6, tmp23)
tmp25 = tmp21 + tmp24
tmp27 = tmp26 + tmp10
tmp28 = triton_helpers.maximum(tmp6, tmp27)
tmp29 = tmp25 + tmp28
tmp31 = tmp30 + tmp4
tmp32 = triton_helpers.maximum(tmp6, tmp31)
tmp33 = tmp29 + tmp32
tmp35 = tmp34 + tmp10
tmp36 = triton_helpers.maximum(tmp6, tmp35)
tmp37 = tmp33 + tmp36
tmp38 = tmp36 <= tmp1
tmp39 = tmp32 <= tmp1
tmp40 = tmp28 <= tmp1
tmp41 = tmp24 <= tmp1
tmp42 = tmp20 <= tmp1
tmp43 = tmp16 <= tmp1
tmp44 = tmp12 <= tmp1
tmp45 = tmp7 <= tmp1
tmp46 = tmp0 <= tmp1
tl.store(in_out_ptr0 + (x2), tmp37, xmask)
tl.store(out_ptr0 + (x2), tmp38, xmask)
tl.store(out_ptr1 + (x2), tmp39, xmask)
tl.store(out_ptr2 + (x2), tmp40, xmask)
tl.store(out_ptr3 + (x2), tmp41, xmask)
tl.store(out_ptr4 + (x2), tmp42, xmask)
tl.store(out_ptr5 + (x2), tmp43, xmask)
tl.store(out_ptr6 + (x2), tmp44, xmask)
tl.store(out_ptr7 + (x2), tmp45, xmask)
tl.store(out_ptr8 + (x2), tmp46, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/dw/cdwfhssl4b3drenhqt6kh6f6inz6gobm33krcm65oxbtudle3alm.py
# Topologically Sorted Source Nodes: [x_25, x_47, x_69, x_91, add_65, add_66, add_67, add_68, add_69, add_70, add_71, add_72, add_73], Original ATen: [aten.relu, aten.add, aten.threshold_backward]
# Source node to ATen node mapping:
# add_65 => add_65
# add_66 => add_66
# add_67 => add_67
# add_68 => add_68
# add_69 => add_69
# add_70 => add_70
# add_71 => add_71
# add_72 => add_72
# add_73 => add_73
# x_25 => relu_11
# x_47 => relu_21
# x_69 => relu_31
# x_91 => relu_41
# Graph fragment:
# %relu_11 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%view_71,), kwargs = {})
# %relu_21 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%view_139,), kwargs = {})
# %relu_31 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%view_207,), kwargs = {})
# %relu_41 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_275,), kwargs = {})
# %add_65 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu_1, 0), kwargs = {})
# %add_66 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_65, %relu_4), kwargs = {})
# %add_67 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_66, %relu_11), kwargs = {})
# %add_68 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_67, %relu_14), kwargs = {})
# %add_69 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_68, %relu_21), kwargs = {})
# %add_70 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_69, %relu_24), kwargs = {})
# %add_71 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_70, %relu_31), kwargs = {})
# %add_72 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_71, %relu_34), kwargs = {})
# %add_73 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_72, %relu_41), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_41, 0), kwargs = {})
# %le_7 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_34, 0), kwargs = {})
# %le_10 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_31, 0), kwargs = {})
# %le_17 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_24, 0), kwargs = {})
# %le_20 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_21, 0), kwargs = {})
# %le_27 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_14, 0), kwargs = {})
# %le_30 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_11, 0), kwargs = {})
# %le_37 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_4, 0), kwargs = {})
# %le_40 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_add_relu_threshold_backward_27 = async_compile.triton('triton_poi_fused_add_relu_threshold_backward_27', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*i1', 12: '*i1', 13: '*i1', 14: '*i1', 15: '*i1', 16: '*i1', 17: '*i1', 18: '*i1', 19: '*i1', 20: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_threshold_backward_27', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_27(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp5 = tl.load(in_ptr2 + (x2), xmask)
tmp6 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + (x2), xmask)
tmp13 = tl.load(in_ptr5 + (x2), xmask)
tmp17 = tl.load(in_ptr6 + (x2), xmask)
tmp19 = tl.load(in_ptr7 + (x2), xmask)
tmp23 = tl.load(in_ptr8 + (x2), xmask)
tmp25 = tl.load(in_ptr9 + (x2), xmask)
tmp1 = 0.0
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tmp4 + tmp9
tmp12 = tmp10 + tmp11
tmp14 = tmp13 + tmp6
tmp15 = triton_helpers.maximum(tmp8, tmp14)
tmp16 = tmp12 + tmp15
tmp18 = tmp16 + tmp17
tmp20 = tmp19 + tmp6
tmp21 = triton_helpers.maximum(tmp8, tmp20)
tmp22 = tmp18 + tmp21
tmp24 = tmp22 + tmp23
tmp26 = tmp25 + tmp6
tmp27 = triton_helpers.maximum(tmp8, tmp26)
tmp28 = tmp24 + tmp27
tmp29 = tmp27 <= tmp1
tmp30 = tmp21 <= tmp1
tmp31 = tmp15 <= tmp1
tmp32 = tmp9 <= tmp1
tmp33 = tmp23 <= tmp1
tmp34 = tmp17 <= tmp1
tmp35 = tmp11 <= tmp1
tmp36 = tmp3 <= tmp1
tmp37 = tmp0 <= tmp1
tl.store(in_out_ptr0 + (x2), tmp28, xmask)
tl.store(out_ptr0 + (x2), tmp29, xmask)
tl.store(out_ptr1 + (x2), tmp30, xmask)
tl.store(out_ptr2 + (x2), tmp31, xmask)
tl.store(out_ptr3 + (x2), tmp32, xmask)
tl.store(out_ptr4 + (x2), tmp33, xmask)
tl.store(out_ptr5 + (x2), tmp34, xmask)
tl.store(out_ptr6 + (x2), tmp35, xmask)
tl.store(out_ptr7 + (x2), tmp36, xmask)
tl.store(out_ptr8 + (x2), tmp37, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (8, 4), (4, 1))
assert_size_stride(primals_8, (8, ), (1, ))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4, ), (1, ))
assert_size_stride(primals_11, (4, 8), (8, 1))
assert_size_stride(primals_12, (4, ), (1, ))
assert_size_stride(primals_13, (8, 4), (4, 1))
assert_size_stride(primals_14, (8, ), (1, ))
assert_size_stride(primals_15, (4, 4), (4, 1))
assert_size_stride(primals_16, (4, ), (1, ))
assert_size_stride(primals_17, (4, 8), (8, 1))
assert_size_stride(primals_18, (4, ), (1, ))
assert_size_stride(primals_19, (4, 4), (4, 1))
assert_size_stride(primals_20, (4, ), (1, ))
assert_size_stride(primals_21, (4, 4), (4, 1))
assert_size_stride(primals_22, (4, ), (1, ))
assert_size_stride(primals_23, (12, 4), (4, 1))
assert_size_stride(primals_24, (12, ), (1, ))
assert_size_stride(primals_25, (12, 4), (4, 1))
assert_size_stride(primals_26, (12, ), (1, ))
assert_size_stride(primals_27, (4, 4), (4, 1))
assert_size_stride(primals_28, (4, ), (1, ))
assert_size_stride(primals_29, (4, 4), (4, 1))
assert_size_stride(primals_30, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0); del buf0 # reuse
buf55 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
buf54 = reinterpret_tensor(buf55, (4, 4, 4), (32, 8, 1), 0) # alias
# Topologically Sorted Source Nodes: [x_1, cat_tgt_1], Original ATen: [aten.relu, aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_relu_0.run(buf2, primals_2, buf54, 64, grid=grid(64), stream=stream0)
del primals_2
buf3 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 8), (1, 4), 0), out=buf3)
buf4 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse
buf28 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
buf27 = reinterpret_tensor(buf28, (4, 4, 4), (32, 8, 1), 0) # alias
# Topologically Sorted Source Nodes: [x_3, cat_tgt], Original ATen: [aten.relu, aten.cat]
triton_poi_fused_cat_relu_0.run(buf4, primals_5, buf27, 64, grid=grid(64), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0); del buf5 # reuse
buf500 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf6, primals_10, buf500, 64, grid=grid(64), stream=stream0)
buf7 = reinterpret_tensor(buf3, (4, 4, 8), (32, 8, 1), 0); del buf3 # reuse
buf501 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_2.run(buf7, primals_8, buf501, 128, grid=grid(128), stream=stream0)
buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
triton_poi_fused_bmm_3.run(buf6, buf8, 16, grid=grid(16), stream=stream0)
buf9 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
triton_poi_fused_bmm_4.run(buf7, buf9, 16, grid=grid(16), stream=stream0)
buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(buf8, buf9, out=buf10)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf10, buf11, 64, grid=grid(64), stream=stream0)
buf12 = reinterpret_tensor(buf9, (4, 4, 1), (4, 1, 16), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
triton_poi_fused_bmm_6.run(buf6, buf12, 16, grid=grid(16), stream=stream0)
buf13 = reinterpret_tensor(buf8, (4, 1, 4), (4, 16, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
triton_poi_fused_bmm_7.run(buf7, buf13, 16, grid=grid(16), stream=stream0)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
extern_kernels.bmm(buf12, buf13, out=buf14)
buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf14, buf15, 64, grid=grid(64), stream=stream0)
buf16 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [tgt_update_1], Original ATen: [aten.cat]
triton_poi_fused_cat_8.run(buf11, buf7, buf15, buf16, 32, grid=grid(32), stream=stream0)
buf17 = reinterpret_tensor(buf13, (4, 4, 1), (4, 1, 16), 0); del buf13 # reuse
# Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.bmm]
triton_poi_fused_bmm_9.run(buf6, buf17, 16, grid=grid(16), stream=stream0)
buf18 = reinterpret_tensor(buf12, (4, 1, 4), (4, 16, 1), 0); del buf12 # reuse
# Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.bmm]
triton_poi_fused_bmm_10.run(buf7, buf18, 16, grid=grid(16), stream=stream0)
buf19 = buf15; del buf15 # reuse
# Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.bmm]
extern_kernels.bmm(buf17, buf18, out=buf19)
buf20 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [softmax_2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf19, buf20, 64, grid=grid(64), stream=stream0)
buf21 = empty_strided_cuda((4, 4, 3), (12, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [tgt_update_2], Original ATen: [aten.cat]
triton_poi_fused_cat_11.run(buf16, buf20, buf7, buf21, 48, grid=grid(48), stream=stream0)
buf22 = reinterpret_tensor(buf18, (4, 4, 1), (4, 1, 16), 0); del buf18 # reuse
# Topologically Sorted Source Nodes: [matmul_3], Original ATen: [aten.bmm]
triton_poi_fused_bmm_12.run(buf6, buf22, 16, grid=grid(16), stream=stream0)
buf23 = reinterpret_tensor(buf17, (4, 1, 4), (4, 16, 1), 0); del buf17 # reuse
# Topologically Sorted Source Nodes: [matmul_3], Original ATen: [aten.bmm]
triton_poi_fused_bmm_13.run(buf7, buf23, 16, grid=grid(16), stream=stream0)
buf24 = buf20; del buf20 # reuse
# Topologically Sorted Source Nodes: [matmul_3], Original ATen: [aten.bmm]
extern_kernels.bmm(buf22, buf23, out=buf24)
buf25 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_3], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf24, buf25, 64, grid=grid(64), stream=stream0)
buf26 = reinterpret_tensor(buf28, (4, 4, 4), (32, 8, 1), 4) # alias
# Topologically Sorted Source Nodes: [tgt_update_3], Original ATen: [aten.cat]
triton_poi_fused_cat_14.run(buf21, buf25, buf7, buf26, 64, grid=grid(64), stream=stream0)
buf29 = reinterpret_tensor(buf25, (16, 4), (4, 1), 0); del buf25 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf28, (16, 8), (8, 1), 0), reinterpret_tensor(primals_11, (8, 4), (1, 8), 0), out=buf29)
buf30 = reinterpret_tensor(buf29, (4, 4, 4), (16, 4, 1), 0); del buf29 # reuse
buf64 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_9, add_1], Original ATen: [aten.relu, aten.add]
triton_poi_fused_add_relu_15.run(buf30, primals_12, buf4, buf64, 64, grid=grid(64), stream=stream0)
buf31 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf30, (16, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 8), (1, 4), 0), out=buf31)
buf32 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf32)
buf33 = reinterpret_tensor(buf32, (4, 4, 4), (16, 4, 1), 0); del buf32 # reuse
buf497 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf33, primals_16, buf497, 64, grid=grid(64), stream=stream0)
buf34 = reinterpret_tensor(buf31, (4, 4, 8), (32, 8, 1), 0); del buf31 # reuse
buf498 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_2.run(buf34, primals_14, buf498, 128, grid=grid(128), stream=stream0)
buf35 = reinterpret_tensor(buf23, (4, 4, 1), (4, 1, 16), 0); del buf23 # reuse
# Topologically Sorted Source Nodes: [matmul_4], Original ATen: [aten.bmm]
triton_poi_fused_bmm_3.run(buf33, buf35, 16, grid=grid(16), stream=stream0)
buf36 = reinterpret_tensor(buf22, (4, 1, 4), (4, 16, 1), 0); del buf22 # reuse
# Topologically Sorted Source Nodes: [matmul_4], Original ATen: [aten.bmm]
triton_poi_fused_bmm_4.run(buf34, buf36, 16, grid=grid(16), stream=stream0)
buf37 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_4], Original ATen: [aten.bmm]
extern_kernels.bmm(buf35, buf36, out=buf37)
buf38 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_4], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf37, buf38, 64, grid=grid(64), stream=stream0)
buf39 = reinterpret_tensor(buf36, (4, 4, 1), (4, 1, 16), 0); del buf36 # reuse
# Topologically Sorted Source Nodes: [matmul_5], Original ATen: [aten.bmm]
triton_poi_fused_bmm_6.run(buf33, buf39, 16, grid=grid(16), stream=stream0)
buf40 = reinterpret_tensor(buf35, (4, 1, 4), (4, 16, 1), 0); del buf35 # reuse
# Topologically Sorted Source Nodes: [matmul_5], Original ATen: [aten.bmm]
triton_poi_fused_bmm_7.run(buf34, buf40, 16, grid=grid(16), stream=stream0)
buf41 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_5], Original ATen: [aten.bmm]
extern_kernels.bmm(buf39, buf40, out=buf41)
buf42 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_5], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf41, buf42, 64, grid=grid(64), stream=stream0)
buf43 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [tgt_update_5], Original ATen: [aten.cat]
triton_poi_fused_cat_8.run(buf38, buf34, buf42, buf43, 32, grid=grid(32), stream=stream0)
buf44 = reinterpret_tensor(buf40, (4, 4, 1), (4, 1, 16), 0); del buf40 # reuse
# Topologically Sorted Source Nodes: [matmul_6], Original ATen: [aten.bmm]
triton_poi_fused_bmm_9.run(buf33, buf44, 16, grid=grid(16), stream=stream0)
buf45 = reinterpret_tensor(buf39, (4, 1, 4), (4, 16, 1), 0); del buf39 # reuse
# Topologically Sorted Source Nodes: [matmul_6], Original ATen: [aten.bmm]
triton_poi_fused_bmm_10.run(buf34, buf45, 16, grid=grid(16), stream=stream0)
buf46 = buf42; del buf42 # reuse
# Topologically Sorted Source Nodes: [matmul_6], Original ATen: [aten.bmm]
extern_kernels.bmm(buf44, buf45, out=buf46)
buf47 = buf38; del buf38 # reuse
# Topologically Sorted Source Nodes: [softmax_6], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf46, buf47, 64, grid=grid(64), stream=stream0)
buf48 = buf21; del buf21 # reuse
# Topologically Sorted Source Nodes: [tgt_update_6], Original ATen: [aten.cat]
triton_poi_fused_cat_11.run(buf43, buf47, buf34, buf48, 48, grid=grid(48), stream=stream0)
buf49 = reinterpret_tensor(buf45, (4, 4, 1), (4, 1, 16), 0); del buf45 # reuse
# Topologically Sorted Source Nodes: [matmul_7], Original ATen: [aten.bmm]
triton_poi_fused_bmm_12.run(buf33, buf49, 16, grid=grid(16), stream=stream0)
buf50 = reinterpret_tensor(buf44, (4, 1, 4), (4, 16, 1), 0); del buf44 # reuse
# Topologically Sorted Source Nodes: [matmul_7], Original ATen: [aten.bmm]
triton_poi_fused_bmm_13.run(buf34, buf50, 16, grid=grid(16), stream=stream0)
buf51 = buf47; del buf47 # reuse
# Topologically Sorted Source Nodes: [matmul_7], Original ATen: [aten.bmm]
extern_kernels.bmm(buf49, buf50, out=buf51)
buf52 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_7], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf51, buf52, 64, grid=grid(64), stream=stream0)
buf53 = reinterpret_tensor(buf55, (4, 4, 4), (32, 8, 1), 4) # alias
# Topologically Sorted Source Nodes: [tgt_update_7], Original ATen: [aten.cat]
triton_poi_fused_cat_14.run(buf48, buf52, buf34, buf53, 64, grid=grid(64), stream=stream0)
buf56 = reinterpret_tensor(buf52, (16, 4), (4, 1), 0); del buf52 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf55, (16, 8), (8, 1), 0), reinterpret_tensor(primals_17, (8, 4), (1, 8), 0), out=buf56)
buf57 = reinterpret_tensor(buf50, (4, 4), (4, 1), 0); del buf50 # reuse
buf58 = buf57; del buf57 # reuse
# Topologically Sorted Source Nodes: [x_15, add, sum_9, v_mean], Original ATen: [aten.relu, aten.add, aten.sum, aten.div]
triton_poi_fused_add_div_relu_sum_16.run(buf58, buf56, primals_18, buf2, 16, grid=grid(16), stream=stream0)
buf59 = reinterpret_tensor(buf49, (4, 4), (4, 1), 0); del buf49 # reuse
# Topologically Sorted Source Nodes: [add_1, sum_10, q_mean], Original ATen: [aten.add, aten.sum, aten.div]
triton_poi_fused_add_div_sum_17.run(buf30, buf4, buf59, 16, grid=grid(16), stream=stream0)
buf60 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_20, buf58, reinterpret_tensor(primals_19, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf60)
buf61 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_17], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_22, buf59, reinterpret_tensor(primals_21, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf61)
buf62 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_15, add], Original ATen: [aten.relu, aten.add]
triton_poi_fused_add_relu_18.run(buf56, primals_18, buf2, buf62, 64, grid=grid(64), stream=stream0)
buf63 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf62, (16, 4), (4, 1), 0), reinterpret_tensor(primals_23, (4, 12), (1, 4), 0), out=buf63)
buf65 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf64, (16, 4), (4, 1), 0), reinterpret_tensor(primals_25, (4, 12), (1, 4), 0), out=buf65)
buf66 = reinterpret_tensor(buf63, (4, 4, 12), (48, 12, 1), 0); del buf63 # reuse
buf495 = empty_strided_cuda((4, 4, 12), (48, 12, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_19.run(buf66, primals_24, buf495, 192, grid=grid(192), stream=stream0)
buf67 = reinterpret_tensor(buf65, (4, 4, 12), (48, 12, 1), 0); del buf65 # reuse
buf494 = empty_strided_cuda((4, 4, 12), (48, 12, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_21], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_19.run(buf67, primals_26, buf494, 192, grid=grid(192), stream=stream0)
buf68 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf69 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf80 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add_2, gated_v_query, gated_v_key, gated_v_val], Original ATen: [aten.add, aten.mul]
triton_poi_fused_add_mul_20.run(buf61, buf66, buf68, buf69, buf80, 64, grid=grid(64), stream=stream0)
buf70 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [matmul_8], Original ATen: [aten.bmm]
triton_poi_fused_bmm_3.run(buf68, buf70, 16, grid=grid(16), stream=stream0)
buf71 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_8], Original ATen: [aten.bmm]
triton_poi_fused_bmm_3.run(buf69, buf71, 16, grid=grid(16), stream=stream0)
buf72 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_8], Original ATen: [aten.bmm]
extern_kernels.bmm(buf70, buf71, out=buf72)
buf73 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf74 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf81 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add_5, gated_q_query, gated_q_key, gated_q_val], Original ATen: [aten.add, aten.mul]
triton_poi_fused_add_mul_20.run(buf60, buf67, buf73, buf74, buf81, 64, grid=grid(64), stream=stream0)
buf75 = reinterpret_tensor(buf71, (4, 4, 1), (4, 1, 16), 0); del buf71 # reuse
# Topologically Sorted Source Nodes: [matmul_9], Original ATen: [aten.bmm]
triton_poi_fused_bmm_3.run(buf73, buf75, 16, grid=grid(16), stream=stream0)
buf76 = reinterpret_tensor(buf70, (4, 1, 4), (4, 16, 1), 0); del buf70 # reuse
# Topologically Sorted Source Nodes: [matmul_9], Original ATen: [aten.bmm]
triton_poi_fused_bmm_3.run(buf74, buf76, 16, grid=grid(16), stream=stream0)
buf77 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_9], Original ATen: [aten.bmm]
extern_kernels.bmm(buf75, buf76, out=buf77)
buf78 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_8], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf72, buf78, 64, grid=grid(64), stream=stream0)
buf79 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_9], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf77, buf79, 64, grid=grid(64), stream=stream0)
buf82 = reinterpret_tensor(buf76, (4, 4, 1), (4, 1, 16), 0); del buf76 # reuse
# Topologically Sorted Source Nodes: [matmul_10], Original ATen: [aten.bmm]
triton_poi_fused_bmm_6.run(buf68, buf82, 16, grid=grid(16), stream=stream0)
buf83 = reinterpret_tensor(buf75, (4, 1, 4), (4, 16, 1), 0); del buf75 # reuse
# Topologically Sorted Source Nodes: [matmul_10], Original ATen: [aten.bmm]
triton_poi_fused_bmm_6.run(buf69, buf83, 16, grid=grid(16), stream=stream0)
buf84 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_10], Original ATen: [aten.bmm]
extern_kernels.bmm(buf82, buf83, out=buf84)
buf85 = reinterpret_tensor(buf83, (4, 4, 1), (4, 1, 16), 0); del buf83 # reuse
# Topologically Sorted Source Nodes: [matmul_11], Original ATen: [aten.bmm]
triton_poi_fused_bmm_6.run(buf73, buf85, 16, grid=grid(16), stream=stream0)
buf86 = reinterpret_tensor(buf82, (4, 1, 4), (4, 16, 1), 0); del buf82 # reuse
# Topologically Sorted Source Nodes: [matmul_11], Original ATen: [aten.bmm]
triton_poi_fused_bmm_6.run(buf74, buf86, 16, grid=grid(16), stream=stream0)
buf87 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_11], Original ATen: [aten.bmm]
extern_kernels.bmm(buf85, buf86, out=buf87)
buf88 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_10], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf84, buf88, 64, grid=grid(64), stream=stream0)
buf89 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_11], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf87, buf89, 64, grid=grid(64), stream=stream0)
buf90 = buf43; del buf43 # reuse
# Topologically Sorted Source Nodes: [v_update_1], Original ATen: [aten.cat]
triton_poi_fused_cat_21.run(buf78, buf80, buf88, buf90, 32, grid=grid(32), stream=stream0)
buf91 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [q_update_1], Original ATen: [aten.cat]
triton_poi_fused_cat_21.run(buf79, buf81, buf89, buf91, 32, grid=grid(32), stream=stream0)
buf92 = reinterpret_tensor(buf86, (4, 4, 1), (4, 1, 16), 0); del buf86 # reuse
# Topologically Sorted Source Nodes: [matmul_12], Original ATen: [aten.bmm]
triton_poi_fused_bmm_9.run(buf68, buf92, 16, grid=grid(16), stream=stream0)
buf93 = reinterpret_tensor(buf85, (4, 1, 4), (4, 16, 1), 0); del buf85 # reuse
# Topologically Sorted Source Nodes: [matmul_12], Original ATen: [aten.bmm]
triton_poi_fused_bmm_9.run(buf69, buf93, 16, grid=grid(16), stream=stream0)
buf94 = buf89; del buf89 # reuse
# Topologically Sorted Source Nodes: [matmul_12], Original ATen: [aten.bmm]
extern_kernels.bmm(buf92, buf93, out=buf94)
buf95 = reinterpret_tensor(buf93, (4, 4, 1), (4, 1, 16), 0); del buf93 # reuse
# Topologically Sorted Source Nodes: [matmul_13], Original ATen: [aten.bmm]
triton_poi_fused_bmm_9.run(buf73, buf95, 16, grid=grid(16), stream=stream0)
buf96 = reinterpret_tensor(buf92, (4, 1, 4), (4, 16, 1), 0); del buf92 # reuse
# Topologically Sorted Source Nodes: [matmul_13], Original ATen: [aten.bmm]
triton_poi_fused_bmm_9.run(buf74, buf96, 16, grid=grid(16), stream=stream0)
buf97 = buf79; del buf79 # reuse
# Topologically Sorted Source Nodes: [matmul_13], Original ATen: [aten.bmm]
extern_kernels.bmm(buf95, buf96, out=buf97)
buf98 = buf88; del buf88 # reuse
# Topologically Sorted Source Nodes: [softmax_12], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf94, buf98, 64, grid=grid(64), stream=stream0)
buf99 = buf78; del buf78 # reuse
# Topologically Sorted Source Nodes: [softmax_13], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf97, buf99, 64, grid=grid(64), stream=stream0)
buf100 = buf48; del buf48 # reuse
# Topologically Sorted Source Nodes: [v_update_2], Original ATen: [aten.cat]
triton_poi_fused_cat_22.run(buf90, buf98, buf80, buf100, 48, grid=grid(48), stream=stream0)
buf101 = empty_strided_cuda((4, 4, 3), (12, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [q_update_2], Original ATen: [aten.cat]
triton_poi_fused_cat_22.run(buf91, buf99, buf81, buf101, 48, grid=grid(48), stream=stream0)
buf102 = reinterpret_tensor(buf96, (4, 4, 1), (4, 1, 16), 0); del buf96 # reuse
# Topologically Sorted Source Nodes: [matmul_14], Original ATen: [aten.bmm]
triton_poi_fused_bmm_12.run(buf68, buf102, 16, grid=grid(16), stream=stream0)
buf103 = reinterpret_tensor(buf95, (4, 1, 4), (4, 16, 1), 0); del buf95 # reuse
# Topologically Sorted Source Nodes: [matmul_14], Original ATen: [aten.bmm]
triton_poi_fused_bmm_12.run(buf69, buf103, 16, grid=grid(16), stream=stream0)
buf104 = buf99; del buf99 # reuse
# Topologically Sorted Source Nodes: [matmul_14], Original ATen: [aten.bmm]
extern_kernels.bmm(buf102, buf103, out=buf104)
buf105 = reinterpret_tensor(buf103, (4, 4, 1), (4, 1, 16), 0); del buf103 # reuse
# Topologically Sorted Source Nodes: [matmul_15], Original ATen: [aten.bmm]
triton_poi_fused_bmm_12.run(buf73, buf105, 16, grid=grid(16), stream=stream0)
buf106 = reinterpret_tensor(buf102, (4, 1, 4), (4, 16, 1), 0); del buf102 # reuse
# Topologically Sorted Source Nodes: [matmul_15], Original ATen: [aten.bmm]
triton_poi_fused_bmm_12.run(buf74, buf106, 16, grid=grid(16), stream=stream0)
buf107 = buf98; del buf98 # reuse
# Topologically Sorted Source Nodes: [matmul_15], Original ATen: [aten.bmm]
extern_kernels.bmm(buf105, buf106, out=buf107)
buf108 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_14], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf104, buf108, 64, grid=grid(64), stream=stream0)
buf109 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_15], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf107, buf109, 64, grid=grid(64), stream=stream0)
buf110 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf112 = buf110; del buf110 # reuse
# Topologically Sorted Source Nodes: [v_update_3, add_8], Original ATen: [aten.cat, aten.add]
triton_poi_fused_add_cat_23.run(buf112, buf100, buf108, buf80, buf62, 64, grid=grid(64), stream=stream0)
buf111 = buf108; del buf108 # reuse
buf114 = buf111; del buf111 # reuse
# Topologically Sorted Source Nodes: [q_update_3, add_9], Original ATen: [aten.cat, aten.add]
triton_poi_fused_add_cat_23.run(buf114, buf101, buf109, buf81, buf64, 64, grid=grid(64), stream=stream0)
buf113 = reinterpret_tensor(buf109, (16, 4), (4, 1), 0); del buf109 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf112, (16, 4), (4, 1), 0), reinterpret_tensor(primals_27, (4, 4), (1, 4), 0), out=buf113)
buf115 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf114, (16, 4), (4, 1), 0), reinterpret_tensor(primals_29, (4, 4), (1, 4), 0), out=buf115)
buf116 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf169 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
buf168 = reinterpret_tensor(buf169, (4, 4, 4), (32, 8, 1), 0) # alias
# Topologically Sorted Source Nodes: [x_15, x_23, add_10, add_11, cat_tgt_3], Original ATen: [aten.relu, aten.add, aten.cat]
triton_poi_fused_add_cat_relu_24.run(buf113, primals_28, buf56, primals_18, buf2, buf116, buf168, 64, grid=grid(64), stream=stream0)
buf117 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf116, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 8), (1, 4), 0), out=buf117)
buf118 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf142 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
buf141 = reinterpret_tensor(buf142, (4, 4, 4), (32, 8, 1), 0) # alias
# Topologically Sorted Source Nodes: [x_25, add_12, add_13, cat_tgt_2], Original ATen: [aten.relu, aten.add, aten.cat]
triton_poi_fused_add_cat_relu_25.run(buf115, primals_30, buf30, buf4, buf118, buf141, 64, grid=grid(64), stream=stream0)
buf119 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf118, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf119)
buf120 = reinterpret_tensor(buf119, (4, 4, 4), (16, 4, 1), 0); del buf119 # reuse
buf490 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_29], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf120, primals_10, buf490, 64, grid=grid(64), stream=stream0)
buf121 = reinterpret_tensor(buf117, (4, 4, 8), (32, 8, 1), 0); del buf117 # reuse
buf491 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_27], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_2.run(buf121, primals_8, buf491, 128, grid=grid(128), stream=stream0)
buf122 = reinterpret_tensor(buf106, (4, 4, 1), (4, 1, 16), 0); del buf106 # reuse
# Topologically Sorted Source Nodes: [matmul_16], Original ATen: [aten.bmm]
triton_poi_fused_bmm_3.run(buf120, buf122, 16, grid=grid(16), stream=stream0)
buf123 = reinterpret_tensor(buf105, (4, 1, 4), (4, 16, 1), 0); del buf105 # reuse
# Topologically Sorted Source Nodes: [matmul_16], Original ATen: [aten.bmm]
triton_poi_fused_bmm_4.run(buf121, buf123, 16, grid=grid(16), stream=stream0)
buf124 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_16], Original ATen: [aten.bmm]
extern_kernels.bmm(buf122, buf123, out=buf124)
buf125 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_16], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf124, buf125, 64, grid=grid(64), stream=stream0)
buf126 = reinterpret_tensor(buf123, (4, 4, 1), (4, 1, 16), 0); del buf123 # reuse
# Topologically Sorted Source Nodes: [matmul_17], Original ATen: [aten.bmm]
triton_poi_fused_bmm_6.run(buf120, buf126, 16, grid=grid(16), stream=stream0)
buf127 = reinterpret_tensor(buf122, (4, 1, 4), (4, 16, 1), 0); del buf122 # reuse
# Topologically Sorted Source Nodes: [matmul_17], Original ATen: [aten.bmm]
triton_poi_fused_bmm_7.run(buf121, buf127, 16, grid=grid(16), stream=stream0)
buf128 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_17], Original ATen: [aten.bmm]
extern_kernels.bmm(buf126, buf127, out=buf128)
buf129 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_17], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf128, buf129, 64, grid=grid(64), stream=stream0)
buf130 = buf91; del buf91 # reuse
# Topologically Sorted Source Nodes: [tgt_update_9], Original ATen: [aten.cat]
triton_poi_fused_cat_8.run(buf125, buf121, buf129, buf130, 32, grid=grid(32), stream=stream0)
buf131 = reinterpret_tensor(buf127, (4, 4, 1), (4, 1, 16), 0); del buf127 # reuse
# Topologically Sorted Source Nodes: [matmul_18], Original ATen: [aten.bmm]
triton_poi_fused_bmm_9.run(buf120, buf131, 16, grid=grid(16), stream=stream0)
buf132 = reinterpret_tensor(buf126, (4, 1, 4), (4, 16, 1), 0); del buf126 # reuse
# Topologically Sorted Source Nodes: [matmul_18], Original ATen: [aten.bmm]
triton_poi_fused_bmm_10.run(buf121, buf132, 16, grid=grid(16), stream=stream0)
buf133 = buf129; del buf129 # reuse
# Topologically Sorted Source Nodes: [matmul_18], Original ATen: [aten.bmm]
extern_kernels.bmm(buf131, buf132, out=buf133)
buf134 = buf125; del buf125 # reuse
# Topologically Sorted Source Nodes: [softmax_18], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf133, buf134, 64, grid=grid(64), stream=stream0)
buf135 = buf101; del buf101 # reuse
# Topologically Sorted Source Nodes: [tgt_update_10], Original ATen: [aten.cat]
triton_poi_fused_cat_11.run(buf130, buf134, buf121, buf135, 48, grid=grid(48), stream=stream0)
buf136 = reinterpret_tensor(buf132, (4, 4, 1), (4, 1, 16), 0); del buf132 # reuse
# Topologically Sorted Source Nodes: [matmul_19], Original ATen: [aten.bmm]
triton_poi_fused_bmm_12.run(buf120, buf136, 16, grid=grid(16), stream=stream0)
buf137 = reinterpret_tensor(buf131, (4, 1, 4), (4, 16, 1), 0); del buf131 # reuse
# Topologically Sorted Source Nodes: [matmul_19], Original ATen: [aten.bmm]
triton_poi_fused_bmm_13.run(buf121, buf137, 16, grid=grid(16), stream=stream0)
buf138 = buf134; del buf134 # reuse
# Topologically Sorted Source Nodes: [matmul_19], Original ATen: [aten.bmm]
extern_kernels.bmm(buf136, buf137, out=buf138)
buf139 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_19], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf138, buf139, 64, grid=grid(64), stream=stream0)
buf140 = reinterpret_tensor(buf142, (4, 4, 4), (32, 8, 1), 4) # alias
# Topologically Sorted Source Nodes: [tgt_update_11], Original ATen: [aten.cat]
triton_poi_fused_cat_14.run(buf135, buf139, buf121, buf140, 64, grid=grid(64), stream=stream0)
buf143 = reinterpret_tensor(buf139, (16, 4), (4, 1), 0); del buf139 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf142, (16, 8), (8, 1), 0), reinterpret_tensor(primals_11, (8, 4), (1, 8), 0), out=buf143)
buf144 = reinterpret_tensor(buf143, (4, 4, 4), (16, 4, 1), 0); del buf143 # reuse
buf178 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_31, add_15], Original ATen: [aten.relu, aten.add]
triton_poi_fused_add_relu_15.run(buf144, primals_12, buf118, buf178, 64, grid=grid(64), stream=stream0)
buf145 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf144, (16, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 8), (1, 4), 0), out=buf145)
buf146 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf116, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf146)
buf147 = reinterpret_tensor(buf146, (4, 4, 4), (16, 4, 1), 0); del buf146 # reuse
buf487 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_35], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf147, primals_16, buf487, 64, grid=grid(64), stream=stream0)
buf148 = reinterpret_tensor(buf145, (4, 4, 8), (32, 8, 1), 0); del buf145 # reuse
buf488 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_33], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_2.run(buf148, primals_14, buf488, 128, grid=grid(128), stream=stream0)
buf149 = reinterpret_tensor(buf137, (4, 4, 1), (4, 1, 16), 0); del buf137 # reuse
# Topologically Sorted Source Nodes: [matmul_20], Original ATen: [aten.bmm]
triton_poi_fused_bmm_3.run(buf147, buf149, 16, grid=grid(16), stream=stream0)
buf150 = reinterpret_tensor(buf136, (4, 1, 4), (4, 16, 1), 0); del buf136 # reuse
# Topologically Sorted Source Nodes: [matmul_20], Original ATen: [aten.bmm]
triton_poi_fused_bmm_4.run(buf148, buf150, 16, grid=grid(16), stream=stream0)
buf151 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_20], Original ATen: [aten.bmm]
extern_kernels.bmm(buf149, buf150, out=buf151)
buf152 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_20], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf151, buf152, 64, grid=grid(64), stream=stream0)
buf153 = reinterpret_tensor(buf150, (4, 4, 1), (4, 1, 16), 0); del buf150 # reuse
# Topologically Sorted Source Nodes: [matmul_21], Original ATen: [aten.bmm]
triton_poi_fused_bmm_6.run(buf147, buf153, 16, grid=grid(16), stream=stream0)
buf154 = reinterpret_tensor(buf149, (4, 1, 4), (4, 16, 1), 0); del buf149 # reuse
# Topologically Sorted Source Nodes: [matmul_21], Original ATen: [aten.bmm]
triton_poi_fused_bmm_7.run(buf148, buf154, 16, grid=grid(16), stream=stream0)
buf155 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_21], Original ATen: [aten.bmm]
extern_kernels.bmm(buf153, buf154, out=buf155)
buf156 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_21], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf155, buf156, 64, grid=grid(64), stream=stream0)
buf157 = buf130; del buf130 # reuse
# Topologically Sorted Source Nodes: [tgt_update_13], Original ATen: [aten.cat]
triton_poi_fused_cat_8.run(buf152, buf148, buf156, buf157, 32, grid=grid(32), stream=stream0)
buf158 = reinterpret_tensor(buf154, (4, 4, 1), (4, 1, 16), 0); del buf154 # reuse
# Topologically Sorted Source Nodes: [matmul_22], Original ATen: [aten.bmm]
triton_poi_fused_bmm_9.run(buf147, buf158, 16, grid=grid(16), stream=stream0)
buf159 = reinterpret_tensor(buf153, (4, 1, 4), (4, 16, 1), 0); del buf153 # reuse
# Topologically Sorted Source Nodes: [matmul_22], Original ATen: [aten.bmm]
triton_poi_fused_bmm_10.run(buf148, buf159, 16, grid=grid(16), stream=stream0)
buf160 = buf156; del buf156 # reuse
# Topologically Sorted Source Nodes: [matmul_22], Original ATen: [aten.bmm]
extern_kernels.bmm(buf158, buf159, out=buf160)
buf161 = buf152; del buf152 # reuse
# Topologically Sorted Source Nodes: [softmax_22], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf160, buf161, 64, grid=grid(64), stream=stream0)
buf162 = buf135; del buf135 # reuse
# Topologically Sorted Source Nodes: [tgt_update_14], Original ATen: [aten.cat]
triton_poi_fused_cat_11.run(buf157, buf161, buf148, buf162, 48, grid=grid(48), stream=stream0)
buf163 = reinterpret_tensor(buf159, (4, 4, 1), (4, 1, 16), 0); del buf159 # reuse
# Topologically Sorted Source Nodes: [matmul_23], Original ATen: [aten.bmm]
triton_poi_fused_bmm_12.run(buf147, buf163, 16, grid=grid(16), stream=stream0)
buf164 = reinterpret_tensor(buf158, (4, 1, 4), (4, 16, 1), 0); del buf158 # reuse
# Topologically Sorted Source Nodes: [matmul_23], Original ATen: [aten.bmm]
triton_poi_fused_bmm_13.run(buf148, buf164, 16, grid=grid(16), stream=stream0)
buf165 = buf161; del buf161 # reuse
# Topologically Sorted Source Nodes: [matmul_23], Original ATen: [aten.bmm]
extern_kernels.bmm(buf163, buf164, out=buf165)
buf166 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_23], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf165, buf166, 64, grid=grid(64), stream=stream0)
buf167 = reinterpret_tensor(buf169, (4, 4, 4), (32, 8, 1), 4) # alias
# Topologically Sorted Source Nodes: [tgt_update_15], Original ATen: [aten.cat]
triton_poi_fused_cat_14.run(buf162, buf166, buf148, buf167, 64, grid=grid(64), stream=stream0)
buf170 = reinterpret_tensor(buf166, (16, 4), (4, 1), 0); del buf166 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf169, (16, 8), (8, 1), 0), reinterpret_tensor(primals_17, (8, 4), (1, 8), 0), out=buf170)
buf171 = reinterpret_tensor(buf164, (4, 4), (4, 1), 0); del buf164 # reuse
buf172 = buf171; del buf171 # reuse
# Topologically Sorted Source Nodes: [x_37, add_14, sum_27, v_mean_1], Original ATen: [aten.relu, aten.add, aten.sum, aten.div]
triton_poi_fused_add_div_relu_sum_16.run(buf172, buf170, primals_18, buf116, 16, grid=grid(16), stream=stream0)
buf173 = reinterpret_tensor(buf163, (4, 4), (4, 1), 0); del buf163 # reuse
# Topologically Sorted Source Nodes: [add_15, sum_28, q_mean_1], Original ATen: [aten.add, aten.sum, aten.div]
triton_poi_fused_add_div_sum_17.run(buf144, buf118, buf173, 16, grid=grid(16), stream=stream0)
buf174 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_38], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_20, buf172, reinterpret_tensor(primals_19, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf174)
buf175 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_39], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_22, buf173, reinterpret_tensor(primals_21, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf175)
buf176 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_37, add_14], Original ATen: [aten.relu, aten.add]
triton_poi_fused_add_relu_18.run(buf170, primals_18, buf116, buf176, 64, grid=grid(64), stream=stream0)
buf177 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf176, (16, 4), (4, 1), 0), reinterpret_tensor(primals_23, (4, 12), (1, 4), 0), out=buf177)
buf179 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf178, (16, 4), (4, 1), 0), reinterpret_tensor(primals_25, (4, 12), (1, 4), 0), out=buf179)
buf180 = reinterpret_tensor(buf177, (4, 4, 12), (48, 12, 1), 0); del buf177 # reuse
buf485 = empty_strided_cuda((4, 4, 12), (48, 12, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_41], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_19.run(buf180, primals_24, buf485, 192, grid=grid(192), stream=stream0)
buf181 = reinterpret_tensor(buf179, (4, 4, 12), (48, 12, 1), 0); del buf179 # reuse
buf484 = empty_strided_cuda((4, 4, 12), (48, 12, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_43], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_19.run(buf181, primals_26, buf484, 192, grid=grid(192), stream=stream0)
buf182 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf183 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf194 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add_16, gated_v_query_1, gated_v_key_1, gated_v_val_1], Original ATen: [aten.add, aten.mul]
triton_poi_fused_add_mul_20.run(buf175, buf180, buf182, buf183, buf194, 64, grid=grid(64), stream=stream0)
buf184 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [matmul_24], Original ATen: [aten.bmm]
triton_poi_fused_bmm_3.run(buf182, buf184, 16, grid=grid(16), stream=stream0)
buf185 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_24], Original ATen: [aten.bmm]
triton_poi_fused_bmm_3.run(buf183, buf185, 16, grid=grid(16), stream=stream0)
buf186 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_24], Original ATen: [aten.bmm]
extern_kernels.bmm(buf184, buf185, out=buf186)
buf187 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf188 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf195 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add_19, gated_q_query_1, gated_q_key_1, gated_q_val_1], Original ATen: [aten.add, aten.mul]
triton_poi_fused_add_mul_20.run(buf174, buf181, buf187, buf188, buf195, 64, grid=grid(64), stream=stream0)
buf189 = reinterpret_tensor(buf185, (4, 4, 1), (4, 1, 16), 0); del buf185 # reuse
# Topologically Sorted Source Nodes: [matmul_25], Original ATen: [aten.bmm]
triton_poi_fused_bmm_3.run(buf187, buf189, 16, grid=grid(16), stream=stream0)
buf190 = reinterpret_tensor(buf184, (4, 1, 4), (4, 16, 1), 0); del buf184 # reuse
# Topologically Sorted Source Nodes: [matmul_25], Original ATen: [aten.bmm]
triton_poi_fused_bmm_3.run(buf188, buf190, 16, grid=grid(16), stream=stream0)
buf191 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_25], Original ATen: [aten.bmm]
extern_kernels.bmm(buf189, buf190, out=buf191)
buf192 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_24], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf186, buf192, 64, grid=grid(64), stream=stream0)
buf193 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_25], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf191, buf193, 64, grid=grid(64), stream=stream0)
buf196 = reinterpret_tensor(buf190, (4, 4, 1), (4, 1, 16), 0); del buf190 # reuse
# Topologically Sorted Source Nodes: [matmul_26], Original ATen: [aten.bmm]
triton_poi_fused_bmm_6.run(buf182, buf196, 16, grid=grid(16), stream=stream0)
buf197 = reinterpret_tensor(buf189, (4, 1, 4), (4, 16, 1), 0); del buf189 # reuse
# Topologically Sorted Source Nodes: [matmul_26], Original ATen: [aten.bmm]
triton_poi_fused_bmm_6.run(buf183, buf197, 16, grid=grid(16), stream=stream0)
buf198 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_26], Original ATen: [aten.bmm]
extern_kernels.bmm(buf196, buf197, out=buf198)
buf199 = reinterpret_tensor(buf197, (4, 4, 1), (4, 1, 16), 0); del buf197 # reuse
# Topologically Sorted Source Nodes: [matmul_27], Original ATen: [aten.bmm]
triton_poi_fused_bmm_6.run(buf187, buf199, 16, grid=grid(16), stream=stream0)
buf200 = reinterpret_tensor(buf196, (4, 1, 4), (4, 16, 1), 0); del buf196 # reuse
# Topologically Sorted Source Nodes: [matmul_27], Original ATen: [aten.bmm]
triton_poi_fused_bmm_6.run(buf188, buf200, 16, grid=grid(16), stream=stream0)
buf201 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_27], Original ATen: [aten.bmm]
extern_kernels.bmm(buf199, buf200, out=buf201)
buf202 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_26], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf198, buf202, 64, grid=grid(64), stream=stream0)
buf203 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_27], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf201, buf203, 64, grid=grid(64), stream=stream0)
buf204 = buf157; del buf157 # reuse
# Topologically Sorted Source Nodes: [v_update_5], Original ATen: [aten.cat]
triton_poi_fused_cat_21.run(buf192, buf194, buf202, buf204, 32, grid=grid(32), stream=stream0)
buf205 = buf90; del buf90 # reuse
# Topologically Sorted Source Nodes: [q_update_5], Original ATen: [aten.cat]
triton_poi_fused_cat_21.run(buf193, buf195, buf203, buf205, 32, grid=grid(32), stream=stream0)
buf206 = reinterpret_tensor(buf200, (4, 4, 1), (4, 1, 16), 0); del buf200 # reuse
# Topologically Sorted Source Nodes: [matmul_28], Original ATen: [aten.bmm]
triton_poi_fused_bmm_9.run(buf182, buf206, 16, grid=grid(16), stream=stream0)
buf207 = reinterpret_tensor(buf199, (4, 1, 4), (4, 16, 1), 0); del buf199 # reuse
# Topologically Sorted Source Nodes: [matmul_28], Original ATen: [aten.bmm]
triton_poi_fused_bmm_9.run(buf183, buf207, 16, grid=grid(16), stream=stream0)
buf208 = buf203; del buf203 # reuse
# Topologically Sorted Source Nodes: [matmul_28], Original ATen: [aten.bmm]
extern_kernels.bmm(buf206, buf207, out=buf208)
buf209 = reinterpret_tensor(buf207, (4, 4, 1), (4, 1, 16), 0); del buf207 # reuse
# Topologically Sorted Source Nodes: [matmul_29], Original ATen: [aten.bmm]
triton_poi_fused_bmm_9.run(buf187, buf209, 16, grid=grid(16), stream=stream0)
buf210 = reinterpret_tensor(buf206, (4, 1, 4), (4, 16, 1), 0); del buf206 # reuse
# Topologically Sorted Source Nodes: [matmul_29], Original ATen: [aten.bmm]
triton_poi_fused_bmm_9.run(buf188, buf210, 16, grid=grid(16), stream=stream0)
buf211 = buf193; del buf193 # reuse
# Topologically Sorted Source Nodes: [matmul_29], Original ATen: [aten.bmm]
extern_kernels.bmm(buf209, buf210, out=buf211)
buf212 = buf202; del buf202 # reuse
# Topologically Sorted Source Nodes: [softmax_28], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf208, buf212, 64, grid=grid(64), stream=stream0)
buf213 = buf192; del buf192 # reuse
# Topologically Sorted Source Nodes: [softmax_29], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf211, buf213, 64, grid=grid(64), stream=stream0)
buf214 = buf162; del buf162 # reuse
# Topologically Sorted Source Nodes: [v_update_6], Original ATen: [aten.cat]
triton_poi_fused_cat_22.run(buf204, buf212, buf194, buf214, 48, grid=grid(48), stream=stream0)
buf215 = buf100; del buf100 # reuse
# Topologically Sorted Source Nodes: [q_update_6], Original ATen: [aten.cat]
triton_poi_fused_cat_22.run(buf205, buf213, buf195, buf215, 48, grid=grid(48), stream=stream0)
buf216 = reinterpret_tensor(buf210, (4, 4, 1), (4, 1, 16), 0); del buf210 # reuse
# Topologically Sorted Source Nodes: [matmul_30], Original ATen: [aten.bmm]
triton_poi_fused_bmm_12.run(buf182, buf216, 16, grid=grid(16), stream=stream0)
buf217 = reinterpret_tensor(buf209, (4, 1, 4), (4, 16, 1), 0); del buf209 # reuse
# Topologically Sorted Source Nodes: [matmul_30], Original ATen: [aten.bmm]
triton_poi_fused_bmm_12.run(buf183, buf217, 16, grid=grid(16), stream=stream0)
buf218 = buf213; del buf213 # reuse
# Topologically Sorted Source Nodes: [matmul_30], Original ATen: [aten.bmm]
extern_kernels.bmm(buf216, buf217, out=buf218)
buf219 = reinterpret_tensor(buf217, (4, 4, 1), (4, 1, 16), 0); del buf217 # reuse
# Topologically Sorted Source Nodes: [matmul_31], Original ATen: [aten.bmm]
triton_poi_fused_bmm_12.run(buf187, buf219, 16, grid=grid(16), stream=stream0)
buf220 = reinterpret_tensor(buf216, (4, 1, 4), (4, 16, 1), 0); del buf216 # reuse
# Topologically Sorted Source Nodes: [matmul_31], Original ATen: [aten.bmm]
triton_poi_fused_bmm_12.run(buf188, buf220, 16, grid=grid(16), stream=stream0)
buf221 = buf212; del buf212 # reuse
# Topologically Sorted Source Nodes: [matmul_31], Original ATen: [aten.bmm]
extern_kernels.bmm(buf219, buf220, out=buf221)
buf222 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_30], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf218, buf222, 64, grid=grid(64), stream=stream0)
buf223 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_31], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf221, buf223, 64, grid=grid(64), stream=stream0)
buf224 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf226 = buf224; del buf224 # reuse
# Topologically Sorted Source Nodes: [v_update_7, add_22], Original ATen: [aten.cat, aten.add]
triton_poi_fused_add_cat_23.run(buf226, buf214, buf222, buf194, buf176, 64, grid=grid(64), stream=stream0)
buf225 = buf222; del buf222 # reuse
buf228 = buf225; del buf225 # reuse
# Topologically Sorted Source Nodes: [q_update_7, add_23], Original ATen: [aten.cat, aten.add]
triton_poi_fused_add_cat_23.run(buf228, buf215, buf223, buf195, buf178, 64, grid=grid(64), stream=stream0)
buf227 = reinterpret_tensor(buf223, (16, 4), (4, 1), 0); del buf223 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf226, (16, 4), (4, 1), 0), reinterpret_tensor(primals_27, (4, 4), (1, 4), 0), out=buf227)
buf229 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf228, (16, 4), (4, 1), 0), reinterpret_tensor(primals_29, (4, 4), (1, 4), 0), out=buf229)
buf230 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf283 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
buf282 = reinterpret_tensor(buf283, (4, 4, 4), (32, 8, 1), 0) # alias
# Topologically Sorted Source Nodes: [x_37, x_45, add_24, add_25, cat_tgt_5], Original ATen: [aten.relu, aten.add, aten.cat]
triton_poi_fused_add_cat_relu_24.run(buf227, primals_28, buf170, primals_18, buf116, buf230, buf282, 64, grid=grid(64), stream=stream0)
buf231 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf230, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 8), (1, 4), 0), out=buf231)
buf232 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf256 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
buf255 = reinterpret_tensor(buf256, (4, 4, 4), (32, 8, 1), 0) # alias
# Topologically Sorted Source Nodes: [x_47, add_26, add_27, cat_tgt_4], Original ATen: [aten.relu, aten.add, aten.cat]
triton_poi_fused_add_cat_relu_25.run(buf229, primals_30, buf144, buf118, buf232, buf255, 64, grid=grid(64), stream=stream0)
buf233 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf232, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf233)
buf234 = reinterpret_tensor(buf233, (4, 4, 4), (16, 4, 1), 0); del buf233 # reuse
buf480 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_51], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf234, primals_10, buf480, 64, grid=grid(64), stream=stream0)
buf235 = reinterpret_tensor(buf231, (4, 4, 8), (32, 8, 1), 0); del buf231 # reuse
buf481 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_49], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_2.run(buf235, primals_8, buf481, 128, grid=grid(128), stream=stream0)
buf236 = reinterpret_tensor(buf220, (4, 4, 1), (4, 1, 16), 0); del buf220 # reuse
# Topologically Sorted Source Nodes: [matmul_32], Original ATen: [aten.bmm]
triton_poi_fused_bmm_3.run(buf234, buf236, 16, grid=grid(16), stream=stream0)
buf237 = reinterpret_tensor(buf219, (4, 1, 4), (4, 16, 1), 0); del buf219 # reuse
# Topologically Sorted Source Nodes: [matmul_32], Original ATen: [aten.bmm]
triton_poi_fused_bmm_4.run(buf235, buf237, 16, grid=grid(16), stream=stream0)
buf238 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_32], Original ATen: [aten.bmm]
extern_kernels.bmm(buf236, buf237, out=buf238)
buf239 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_32], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf238, buf239, 64, grid=grid(64), stream=stream0)
buf240 = reinterpret_tensor(buf237, (4, 4, 1), (4, 1, 16), 0); del buf237 # reuse
# Topologically Sorted Source Nodes: [matmul_33], Original ATen: [aten.bmm]
triton_poi_fused_bmm_6.run(buf234, buf240, 16, grid=grid(16), stream=stream0)
buf241 = reinterpret_tensor(buf236, (4, 1, 4), (4, 16, 1), 0); del buf236 # reuse
# Topologically Sorted Source Nodes: [matmul_33], Original ATen: [aten.bmm]
triton_poi_fused_bmm_7.run(buf235, buf241, 16, grid=grid(16), stream=stream0)
buf242 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_33], Original ATen: [aten.bmm]
extern_kernels.bmm(buf240, buf241, out=buf242)
buf243 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_33], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf242, buf243, 64, grid=grid(64), stream=stream0)
buf244 = buf205; del buf205 # reuse
# Topologically Sorted Source Nodes: [tgt_update_17], Original ATen: [aten.cat]
triton_poi_fused_cat_8.run(buf239, buf235, buf243, buf244, 32, grid=grid(32), stream=stream0)
buf245 = reinterpret_tensor(buf241, (4, 4, 1), (4, 1, 16), 0); del buf241 # reuse
# Topologically Sorted Source Nodes: [matmul_34], Original ATen: [aten.bmm]
triton_poi_fused_bmm_9.run(buf234, buf245, 16, grid=grid(16), stream=stream0)
buf246 = reinterpret_tensor(buf240, (4, 1, 4), (4, 16, 1), 0); del buf240 # reuse
# Topologically Sorted Source Nodes: [matmul_34], Original ATen: [aten.bmm]
triton_poi_fused_bmm_10.run(buf235, buf246, 16, grid=grid(16), stream=stream0)
buf247 = buf243; del buf243 # reuse
# Topologically Sorted Source Nodes: [matmul_34], Original ATen: [aten.bmm]
extern_kernels.bmm(buf245, buf246, out=buf247)
buf248 = buf239; del buf239 # reuse
# Topologically Sorted Source Nodes: [softmax_34], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf247, buf248, 64, grid=grid(64), stream=stream0)
buf249 = buf215; del buf215 # reuse
# Topologically Sorted Source Nodes: [tgt_update_18], Original ATen: [aten.cat]
triton_poi_fused_cat_11.run(buf244, buf248, buf235, buf249, 48, grid=grid(48), stream=stream0)
buf250 = reinterpret_tensor(buf246, (4, 4, 1), (4, 1, 16), 0); del buf246 # reuse
# Topologically Sorted Source Nodes: [matmul_35], Original ATen: [aten.bmm]
triton_poi_fused_bmm_12.run(buf234, buf250, 16, grid=grid(16), stream=stream0)
buf251 = reinterpret_tensor(buf245, (4, 1, 4), (4, 16, 1), 0); del buf245 # reuse
# Topologically Sorted Source Nodes: [matmul_35], Original ATen: [aten.bmm]
triton_poi_fused_bmm_13.run(buf235, buf251, 16, grid=grid(16), stream=stream0)
buf252 = buf248; del buf248 # reuse
# Topologically Sorted Source Nodes: [matmul_35], Original ATen: [aten.bmm]
extern_kernels.bmm(buf250, buf251, out=buf252)
buf253 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_35], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf252, buf253, 64, grid=grid(64), stream=stream0)
buf254 = reinterpret_tensor(buf256, (4, 4, 4), (32, 8, 1), 4) # alias
# Topologically Sorted Source Nodes: [tgt_update_19], Original ATen: [aten.cat]
triton_poi_fused_cat_14.run(buf249, buf253, buf235, buf254, 64, grid=grid(64), stream=stream0)
buf257 = reinterpret_tensor(buf253, (16, 4), (4, 1), 0); del buf253 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf256, (16, 8), (8, 1), 0), reinterpret_tensor(primals_11, (8, 4), (1, 8), 0), out=buf257)
buf258 = reinterpret_tensor(buf257, (4, 4, 4), (16, 4, 1), 0); del buf257 # reuse
buf292 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_53, add_29], Original ATen: [aten.relu, aten.add]
triton_poi_fused_add_relu_15.run(buf258, primals_12, buf232, buf292, 64, grid=grid(64), stream=stream0)
buf259 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf258, (16, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 8), (1, 4), 0), out=buf259)
buf260 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf230, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf260)
buf261 = reinterpret_tensor(buf260, (4, 4, 4), (16, 4, 1), 0); del buf260 # reuse
buf477 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_57], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf261, primals_16, buf477, 64, grid=grid(64), stream=stream0)
buf262 = reinterpret_tensor(buf259, (4, 4, 8), (32, 8, 1), 0); del buf259 # reuse
buf478 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_55], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_2.run(buf262, primals_14, buf478, 128, grid=grid(128), stream=stream0)
buf263 = reinterpret_tensor(buf251, (4, 4, 1), (4, 1, 16), 0); del buf251 # reuse
# Topologically Sorted Source Nodes: [matmul_36], Original ATen: [aten.bmm]
triton_poi_fused_bmm_3.run(buf261, buf263, 16, grid=grid(16), stream=stream0)
buf264 = reinterpret_tensor(buf250, (4, 1, 4), (4, 16, 1), 0); del buf250 # reuse
# Topologically Sorted Source Nodes: [matmul_36], Original ATen: [aten.bmm]
triton_poi_fused_bmm_4.run(buf262, buf264, 16, grid=grid(16), stream=stream0)
buf265 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_36], Original ATen: [aten.bmm]
extern_kernels.bmm(buf263, buf264, out=buf265)
buf266 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_36], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf265, buf266, 64, grid=grid(64), stream=stream0)
buf267 = reinterpret_tensor(buf264, (4, 4, 1), (4, 1, 16), 0); del buf264 # reuse
# Topologically Sorted Source Nodes: [matmul_37], Original ATen: [aten.bmm]
triton_poi_fused_bmm_6.run(buf261, buf267, 16, grid=grid(16), stream=stream0)
buf268 = reinterpret_tensor(buf263, (4, 1, 4), (4, 16, 1), 0); del buf263 # reuse
# Topologically Sorted Source Nodes: [matmul_37], Original ATen: [aten.bmm]
triton_poi_fused_bmm_7.run(buf262, buf268, 16, grid=grid(16), stream=stream0)
buf269 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_37], Original ATen: [aten.bmm]
extern_kernels.bmm(buf267, buf268, out=buf269)
buf270 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_37], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf269, buf270, 64, grid=grid(64), stream=stream0)
buf271 = buf244; del buf244 # reuse
# Topologically Sorted Source Nodes: [tgt_update_21], Original ATen: [aten.cat]
triton_poi_fused_cat_8.run(buf266, buf262, buf270, buf271, 32, grid=grid(32), stream=stream0)
buf272 = reinterpret_tensor(buf268, (4, 4, 1), (4, 1, 16), 0); del buf268 # reuse
# Topologically Sorted Source Nodes: [matmul_38], Original ATen: [aten.bmm]
triton_poi_fused_bmm_9.run(buf261, buf272, 16, grid=grid(16), stream=stream0)
buf273 = reinterpret_tensor(buf267, (4, 1, 4), (4, 16, 1), 0); del buf267 # reuse
# Topologically Sorted Source Nodes: [matmul_38], Original ATen: [aten.bmm]
triton_poi_fused_bmm_10.run(buf262, buf273, 16, grid=grid(16), stream=stream0)
buf274 = buf270; del buf270 # reuse
# Topologically Sorted Source Nodes: [matmul_38], Original ATen: [aten.bmm]
extern_kernels.bmm(buf272, buf273, out=buf274)
buf275 = buf266; del buf266 # reuse
# Topologically Sorted Source Nodes: [softmax_38], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf274, buf275, 64, grid=grid(64), stream=stream0)
buf276 = buf249; del buf249 # reuse
# Topologically Sorted Source Nodes: [tgt_update_22], Original ATen: [aten.cat]
triton_poi_fused_cat_11.run(buf271, buf275, buf262, buf276, 48, grid=grid(48), stream=stream0)
buf277 = reinterpret_tensor(buf273, (4, 4, 1), (4, 1, 16), 0); del buf273 # reuse
# Topologically Sorted Source Nodes: [matmul_39], Original ATen: [aten.bmm]
triton_poi_fused_bmm_12.run(buf261, buf277, 16, grid=grid(16), stream=stream0)
buf278 = reinterpret_tensor(buf272, (4, 1, 4), (4, 16, 1), 0); del buf272 # reuse
# Topologically Sorted Source Nodes: [matmul_39], Original ATen: [aten.bmm]
triton_poi_fused_bmm_13.run(buf262, buf278, 16, grid=grid(16), stream=stream0)
buf279 = buf275; del buf275 # reuse
# Topologically Sorted Source Nodes: [matmul_39], Original ATen: [aten.bmm]
extern_kernels.bmm(buf277, buf278, out=buf279)
buf280 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_39], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf279, buf280, 64, grid=grid(64), stream=stream0)
buf281 = reinterpret_tensor(buf283, (4, 4, 4), (32, 8, 1), 4) # alias
# Topologically Sorted Source Nodes: [tgt_update_23], Original ATen: [aten.cat]
triton_poi_fused_cat_14.run(buf276, buf280, buf262, buf281, 64, grid=grid(64), stream=stream0)
buf284 = reinterpret_tensor(buf280, (16, 4), (4, 1), 0); del buf280 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf283, (16, 8), (8, 1), 0), reinterpret_tensor(primals_17, (8, 4), (1, 8), 0), out=buf284)
buf285 = reinterpret_tensor(buf278, (4, 4), (4, 1), 0); del buf278 # reuse
buf286 = buf285; del buf285 # reuse
# Topologically Sorted Source Nodes: [x_59, add_28, sum_45, v_mean_2], Original ATen: [aten.relu, aten.add, aten.sum, aten.div]
triton_poi_fused_add_div_relu_sum_16.run(buf286, buf284, primals_18, buf230, 16, grid=grid(16), stream=stream0)
buf287 = reinterpret_tensor(buf277, (4, 4), (4, 1), 0); del buf277 # reuse
# Topologically Sorted Source Nodes: [add_29, sum_46, q_mean_2], Original ATen: [aten.add, aten.sum, aten.div]
triton_poi_fused_add_div_sum_17.run(buf258, buf232, buf287, 16, grid=grid(16), stream=stream0)
buf288 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_60], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_20, buf286, reinterpret_tensor(primals_19, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf288)
buf289 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_61], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_22, buf287, reinterpret_tensor(primals_21, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf289)
buf290 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_59, add_28], Original ATen: [aten.relu, aten.add]
triton_poi_fused_add_relu_18.run(buf284, primals_18, buf230, buf290, 64, grid=grid(64), stream=stream0)
buf291 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf290, (16, 4), (4, 1), 0), reinterpret_tensor(primals_23, (4, 12), (1, 4), 0), out=buf291)
buf293 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf292, (16, 4), (4, 1), 0), reinterpret_tensor(primals_25, (4, 12), (1, 4), 0), out=buf293)
buf294 = reinterpret_tensor(buf291, (4, 4, 12), (48, 12, 1), 0); del buf291 # reuse
buf475 = empty_strided_cuda((4, 4, 12), (48, 12, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_63], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_19.run(buf294, primals_24, buf475, 192, grid=grid(192), stream=stream0)
buf295 = reinterpret_tensor(buf293, (4, 4, 12), (48, 12, 1), 0); del buf293 # reuse
buf474 = empty_strided_cuda((4, 4, 12), (48, 12, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_65], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_19.run(buf295, primals_26, buf474, 192, grid=grid(192), stream=stream0)
buf296 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf297 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf308 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add_30, gated_v_query_2, gated_v_key_2, gated_v_val_2], Original ATen: [aten.add, aten.mul]
triton_poi_fused_add_mul_20.run(buf289, buf294, buf296, buf297, buf308, 64, grid=grid(64), stream=stream0)
buf298 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [matmul_40], Original ATen: [aten.bmm]
triton_poi_fused_bmm_3.run(buf296, buf298, 16, grid=grid(16), stream=stream0)
buf299 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_40], Original ATen: [aten.bmm]
triton_poi_fused_bmm_3.run(buf297, buf299, 16, grid=grid(16), stream=stream0)
buf300 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_40], Original ATen: [aten.bmm]
extern_kernels.bmm(buf298, buf299, out=buf300)
buf301 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf302 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf309 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add_33, gated_q_query_2, gated_q_key_2, gated_q_val_2], Original ATen: [aten.add, aten.mul]
triton_poi_fused_add_mul_20.run(buf288, buf295, buf301, buf302, buf309, 64, grid=grid(64), stream=stream0)
buf303 = reinterpret_tensor(buf299, (4, 4, 1), (4, 1, 16), 0); del buf299 # reuse
# Topologically Sorted Source Nodes: [matmul_41], Original ATen: [aten.bmm]
triton_poi_fused_bmm_3.run(buf301, buf303, 16, grid=grid(16), stream=stream0)
buf304 = reinterpret_tensor(buf298, (4, 1, 4), (4, 16, 1), 0); del buf298 # reuse
# Topologically Sorted Source Nodes: [matmul_41], Original ATen: [aten.bmm]
triton_poi_fused_bmm_3.run(buf302, buf304, 16, grid=grid(16), stream=stream0)
buf305 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_41], Original ATen: [aten.bmm]
extern_kernels.bmm(buf303, buf304, out=buf305)
buf306 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_40], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf300, buf306, 64, grid=grid(64), stream=stream0)
buf307 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_41], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf305, buf307, 64, grid=grid(64), stream=stream0)
buf310 = reinterpret_tensor(buf304, (4, 4, 1), (4, 1, 16), 0); del buf304 # reuse
# Topologically Sorted Source Nodes: [matmul_42], Original ATen: [aten.bmm]
triton_poi_fused_bmm_6.run(buf296, buf310, 16, grid=grid(16), stream=stream0)
buf311 = reinterpret_tensor(buf303, (4, 1, 4), (4, 16, 1), 0); del buf303 # reuse
# Topologically Sorted Source Nodes: [matmul_42], Original ATen: [aten.bmm]
triton_poi_fused_bmm_6.run(buf297, buf311, 16, grid=grid(16), stream=stream0)
buf312 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_42], Original ATen: [aten.bmm]
extern_kernels.bmm(buf310, buf311, out=buf312)
buf313 = reinterpret_tensor(buf311, (4, 4, 1), (4, 1, 16), 0); del buf311 # reuse
# Topologically Sorted Source Nodes: [matmul_43], Original ATen: [aten.bmm]
triton_poi_fused_bmm_6.run(buf301, buf313, 16, grid=grid(16), stream=stream0)
buf314 = reinterpret_tensor(buf310, (4, 1, 4), (4, 16, 1), 0); del buf310 # reuse
# Topologically Sorted Source Nodes: [matmul_43], Original ATen: [aten.bmm]
triton_poi_fused_bmm_6.run(buf302, buf314, 16, grid=grid(16), stream=stream0)
buf315 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_43], Original ATen: [aten.bmm]
extern_kernels.bmm(buf313, buf314, out=buf315)
buf316 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_42], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf312, buf316, 64, grid=grid(64), stream=stream0)
buf317 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_43], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf315, buf317, 64, grid=grid(64), stream=stream0)
buf318 = buf271; del buf271 # reuse
# Topologically Sorted Source Nodes: [v_update_9], Original ATen: [aten.cat]
triton_poi_fused_cat_21.run(buf306, buf308, buf316, buf318, 32, grid=grid(32), stream=stream0)
buf319 = buf204; del buf204 # reuse
# Topologically Sorted Source Nodes: [q_update_9], Original ATen: [aten.cat]
triton_poi_fused_cat_21.run(buf307, buf309, buf317, buf319, 32, grid=grid(32), stream=stream0)
buf320 = reinterpret_tensor(buf314, (4, 4, 1), (4, 1, 16), 0); del buf314 # reuse
# Topologically Sorted Source Nodes: [matmul_44], Original ATen: [aten.bmm]
triton_poi_fused_bmm_9.run(buf296, buf320, 16, grid=grid(16), stream=stream0)
buf321 = reinterpret_tensor(buf313, (4, 1, 4), (4, 16, 1), 0); del buf313 # reuse
# Topologically Sorted Source Nodes: [matmul_44], Original ATen: [aten.bmm]
triton_poi_fused_bmm_9.run(buf297, buf321, 16, grid=grid(16), stream=stream0)
buf322 = buf317; del buf317 # reuse
# Topologically Sorted Source Nodes: [matmul_44], Original ATen: [aten.bmm]
extern_kernels.bmm(buf320, buf321, out=buf322)
buf323 = reinterpret_tensor(buf321, (4, 4, 1), (4, 1, 16), 0); del buf321 # reuse
# Topologically Sorted Source Nodes: [matmul_45], Original ATen: [aten.bmm]
triton_poi_fused_bmm_9.run(buf301, buf323, 16, grid=grid(16), stream=stream0)
buf324 = reinterpret_tensor(buf320, (4, 1, 4), (4, 16, 1), 0); del buf320 # reuse
# Topologically Sorted Source Nodes: [matmul_45], Original ATen: [aten.bmm]
triton_poi_fused_bmm_9.run(buf302, buf324, 16, grid=grid(16), stream=stream0)
buf325 = buf307; del buf307 # reuse
# Topologically Sorted Source Nodes: [matmul_45], Original ATen: [aten.bmm]
extern_kernels.bmm(buf323, buf324, out=buf325)
buf326 = buf316; del buf316 # reuse
# Topologically Sorted Source Nodes: [softmax_44], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf322, buf326, 64, grid=grid(64), stream=stream0)
buf327 = buf306; del buf306 # reuse
# Topologically Sorted Source Nodes: [softmax_45], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf325, buf327, 64, grid=grid(64), stream=stream0)
buf328 = buf276; del buf276 # reuse
# Topologically Sorted Source Nodes: [v_update_10], Original ATen: [aten.cat]
triton_poi_fused_cat_22.run(buf318, buf326, buf308, buf328, 48, grid=grid(48), stream=stream0)
buf329 = buf214; del buf214 # reuse
# Topologically Sorted Source Nodes: [q_update_10], Original ATen: [aten.cat]
triton_poi_fused_cat_22.run(buf319, buf327, buf309, buf329, 48, grid=grid(48), stream=stream0)
buf330 = reinterpret_tensor(buf324, (4, 4, 1), (4, 1, 16), 0); del buf324 # reuse
# Topologically Sorted Source Nodes: [matmul_46], Original ATen: [aten.bmm]
triton_poi_fused_bmm_12.run(buf296, buf330, 16, grid=grid(16), stream=stream0)
buf331 = reinterpret_tensor(buf323, (4, 1, 4), (4, 16, 1), 0); del buf323 # reuse
# Topologically Sorted Source Nodes: [matmul_46], Original ATen: [aten.bmm]
triton_poi_fused_bmm_12.run(buf297, buf331, 16, grid=grid(16), stream=stream0)
buf332 = buf327; del buf327 # reuse
# Topologically Sorted Source Nodes: [matmul_46], Original ATen: [aten.bmm]
extern_kernels.bmm(buf330, buf331, out=buf332)
buf333 = reinterpret_tensor(buf331, (4, 4, 1), (4, 1, 16), 0); del buf331 # reuse
# Topologically Sorted Source Nodes: [matmul_47], Original ATen: [aten.bmm]
triton_poi_fused_bmm_12.run(buf301, buf333, 16, grid=grid(16), stream=stream0)
buf334 = reinterpret_tensor(buf330, (4, 1, 4), (4, 16, 1), 0); del buf330 # reuse
# Topologically Sorted Source Nodes: [matmul_47], Original ATen: [aten.bmm]
triton_poi_fused_bmm_12.run(buf302, buf334, 16, grid=grid(16), stream=stream0)
buf335 = buf326; del buf326 # reuse
# Topologically Sorted Source Nodes: [matmul_47], Original ATen: [aten.bmm]
extern_kernels.bmm(buf333, buf334, out=buf335)
buf336 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_46], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf332, buf336, 64, grid=grid(64), stream=stream0)
buf337 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_47], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf335, buf337, 64, grid=grid(64), stream=stream0)
buf338 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf340 = buf338; del buf338 # reuse
# Topologically Sorted Source Nodes: [v_update_11, add_36], Original ATen: [aten.cat, aten.add]
triton_poi_fused_add_cat_23.run(buf340, buf328, buf336, buf308, buf290, 64, grid=grid(64), stream=stream0)
buf339 = buf336; del buf336 # reuse
buf342 = buf339; del buf339 # reuse
# Topologically Sorted Source Nodes: [q_update_11, add_37], Original ATen: [aten.cat, aten.add]
triton_poi_fused_add_cat_23.run(buf342, buf329, buf337, buf309, buf292, 64, grid=grid(64), stream=stream0)
buf341 = reinterpret_tensor(buf337, (16, 4), (4, 1), 0); del buf337 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf340, (16, 4), (4, 1), 0), reinterpret_tensor(primals_27, (4, 4), (1, 4), 0), out=buf341)
buf343 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf342, (16, 4), (4, 1), 0), reinterpret_tensor(primals_29, (4, 4), (1, 4), 0), out=buf343)
buf344 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf397 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
buf396 = reinterpret_tensor(buf397, (4, 4, 4), (32, 8, 1), 0) # alias
# Topologically Sorted Source Nodes: [x_59, x_67, add_38, add_39, cat_tgt_7], Original ATen: [aten.relu, aten.add, aten.cat]
triton_poi_fused_add_cat_relu_24.run(buf341, primals_28, buf284, primals_18, buf230, buf344, buf396, 64, grid=grid(64), stream=stream0)
buf345 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf344, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 8), (1, 4), 0), out=buf345)
buf346 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf370 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
buf369 = reinterpret_tensor(buf370, (4, 4, 4), (32, 8, 1), 0) # alias
# Topologically Sorted Source Nodes: [x_69, add_40, add_41, cat_tgt_6], Original ATen: [aten.relu, aten.add, aten.cat]
triton_poi_fused_add_cat_relu_25.run(buf343, primals_30, buf258, buf232, buf346, buf369, 64, grid=grid(64), stream=stream0)
buf347 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf346, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf347)
buf348 = reinterpret_tensor(buf347, (4, 4, 4), (16, 4, 1), 0); del buf347 # reuse
buf470 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_73], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf348, primals_10, buf470, 64, grid=grid(64), stream=stream0)
del primals_10
buf349 = reinterpret_tensor(buf345, (4, 4, 8), (32, 8, 1), 0); del buf345 # reuse
buf471 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_71], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_2.run(buf349, primals_8, buf471, 128, grid=grid(128), stream=stream0)
del primals_8
buf350 = reinterpret_tensor(buf334, (4, 4, 1), (4, 1, 16), 0); del buf334 # reuse
# Topologically Sorted Source Nodes: [matmul_48], Original ATen: [aten.bmm]
triton_poi_fused_bmm_3.run(buf348, buf350, 16, grid=grid(16), stream=stream0)
buf351 = reinterpret_tensor(buf333, (4, 1, 4), (4, 16, 1), 0); del buf333 # reuse
# Topologically Sorted Source Nodes: [matmul_48], Original ATen: [aten.bmm]
triton_poi_fused_bmm_4.run(buf349, buf351, 16, grid=grid(16), stream=stream0)
buf352 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_48], Original ATen: [aten.bmm]
extern_kernels.bmm(buf350, buf351, out=buf352)
buf353 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_48], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf352, buf353, 64, grid=grid(64), stream=stream0)
buf354 = reinterpret_tensor(buf351, (4, 4, 1), (4, 1, 16), 0); del buf351 # reuse
# Topologically Sorted Source Nodes: [matmul_49], Original ATen: [aten.bmm]
triton_poi_fused_bmm_6.run(buf348, buf354, 16, grid=grid(16), stream=stream0)
buf355 = reinterpret_tensor(buf350, (4, 1, 4), (4, 16, 1), 0); del buf350 # reuse
# Topologically Sorted Source Nodes: [matmul_49], Original ATen: [aten.bmm]
triton_poi_fused_bmm_7.run(buf349, buf355, 16, grid=grid(16), stream=stream0)
buf356 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_49], Original ATen: [aten.bmm]
extern_kernels.bmm(buf354, buf355, out=buf356)
buf357 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_49], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf356, buf357, 64, grid=grid(64), stream=stream0)
buf358 = buf319; del buf319 # reuse
# Topologically Sorted Source Nodes: [tgt_update_25], Original ATen: [aten.cat]
triton_poi_fused_cat_8.run(buf353, buf349, buf357, buf358, 32, grid=grid(32), stream=stream0)
buf359 = reinterpret_tensor(buf355, (4, 4, 1), (4, 1, 16), 0); del buf355 # reuse
# Topologically Sorted Source Nodes: [matmul_50], Original ATen: [aten.bmm]
triton_poi_fused_bmm_9.run(buf348, buf359, 16, grid=grid(16), stream=stream0)
buf360 = reinterpret_tensor(buf354, (4, 1, 4), (4, 16, 1), 0); del buf354 # reuse
# Topologically Sorted Source Nodes: [matmul_50], Original ATen: [aten.bmm]
triton_poi_fused_bmm_10.run(buf349, buf360, 16, grid=grid(16), stream=stream0)
buf361 = buf357; del buf357 # reuse
# Topologically Sorted Source Nodes: [matmul_50], Original ATen: [aten.bmm]
extern_kernels.bmm(buf359, buf360, out=buf361)
buf362 = buf353; del buf353 # reuse
# Topologically Sorted Source Nodes: [softmax_50], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf361, buf362, 64, grid=grid(64), stream=stream0)
buf363 = buf329; del buf329 # reuse
# Topologically Sorted Source Nodes: [tgt_update_26], Original ATen: [aten.cat]
triton_poi_fused_cat_11.run(buf358, buf362, buf349, buf363, 48, grid=grid(48), stream=stream0)
buf364 = reinterpret_tensor(buf360, (4, 4, 1), (4, 1, 16), 0); del buf360 # reuse
# Topologically Sorted Source Nodes: [matmul_51], Original ATen: [aten.bmm]
triton_poi_fused_bmm_12.run(buf348, buf364, 16, grid=grid(16), stream=stream0)
buf365 = reinterpret_tensor(buf359, (4, 1, 4), (4, 16, 1), 0); del buf359 # reuse
# Topologically Sorted Source Nodes: [matmul_51], Original ATen: [aten.bmm]
triton_poi_fused_bmm_13.run(buf349, buf365, 16, grid=grid(16), stream=stream0)
buf366 = buf362; del buf362 # reuse
# Topologically Sorted Source Nodes: [matmul_51], Original ATen: [aten.bmm]
extern_kernels.bmm(buf364, buf365, out=buf366)
buf367 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_51], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf366, buf367, 64, grid=grid(64), stream=stream0)
buf368 = reinterpret_tensor(buf370, (4, 4, 4), (32, 8, 1), 4) # alias
# Topologically Sorted Source Nodes: [tgt_update_27], Original ATen: [aten.cat]
triton_poi_fused_cat_14.run(buf363, buf367, buf349, buf368, 64, grid=grid(64), stream=stream0)
buf371 = reinterpret_tensor(buf367, (16, 4), (4, 1), 0); del buf367 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf370, (16, 8), (8, 1), 0), reinterpret_tensor(primals_11, (8, 4), (1, 8), 0), out=buf371)
buf372 = reinterpret_tensor(buf371, (4, 4, 4), (16, 4, 1), 0); del buf371 # reuse
buf406 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_75, add_43], Original ATen: [aten.relu, aten.add]
triton_poi_fused_add_relu_15.run(buf372, primals_12, buf346, buf406, 64, grid=grid(64), stream=stream0)
del primals_12
buf373 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf372, (16, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 8), (1, 4), 0), out=buf373)
buf374 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf344, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf374)
buf375 = reinterpret_tensor(buf374, (4, 4, 4), (16, 4, 1), 0); del buf374 # reuse
buf467 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_79], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf375, primals_16, buf467, 64, grid=grid(64), stream=stream0)
del primals_16
buf376 = reinterpret_tensor(buf373, (4, 4, 8), (32, 8, 1), 0); del buf373 # reuse
buf468 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_77], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_2.run(buf376, primals_14, buf468, 128, grid=grid(128), stream=stream0)
del primals_14
buf377 = reinterpret_tensor(buf365, (4, 4, 1), (4, 1, 16), 0); del buf365 # reuse
# Topologically Sorted Source Nodes: [matmul_52], Original ATen: [aten.bmm]
triton_poi_fused_bmm_3.run(buf375, buf377, 16, grid=grid(16), stream=stream0)
buf378 = reinterpret_tensor(buf364, (4, 1, 4), (4, 16, 1), 0); del buf364 # reuse
# Topologically Sorted Source Nodes: [matmul_52], Original ATen: [aten.bmm]
triton_poi_fused_bmm_4.run(buf376, buf378, 16, grid=grid(16), stream=stream0)
buf379 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_52], Original ATen: [aten.bmm]
extern_kernels.bmm(buf377, buf378, out=buf379)
buf380 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_52], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf379, buf380, 64, grid=grid(64), stream=stream0)
buf381 = reinterpret_tensor(buf378, (4, 4, 1), (4, 1, 16), 0); del buf378 # reuse
# Topologically Sorted Source Nodes: [matmul_53], Original ATen: [aten.bmm]
triton_poi_fused_bmm_6.run(buf375, buf381, 16, grid=grid(16), stream=stream0)
buf382 = reinterpret_tensor(buf377, (4, 1, 4), (4, 16, 1), 0); del buf377 # reuse
# Topologically Sorted Source Nodes: [matmul_53], Original ATen: [aten.bmm]
triton_poi_fused_bmm_7.run(buf376, buf382, 16, grid=grid(16), stream=stream0)
buf383 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_53], Original ATen: [aten.bmm]
extern_kernels.bmm(buf381, buf382, out=buf383)
buf384 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_53], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf383, buf384, 64, grid=grid(64), stream=stream0)
buf385 = buf358; del buf358 # reuse
# Topologically Sorted Source Nodes: [tgt_update_29], Original ATen: [aten.cat]
triton_poi_fused_cat_8.run(buf380, buf376, buf384, buf385, 32, grid=grid(32), stream=stream0)
buf386 = reinterpret_tensor(buf382, (4, 4, 1), (4, 1, 16), 0); del buf382 # reuse
# Topologically Sorted Source Nodes: [matmul_54], Original ATen: [aten.bmm]
triton_poi_fused_bmm_9.run(buf375, buf386, 16, grid=grid(16), stream=stream0)
buf387 = reinterpret_tensor(buf381, (4, 1, 4), (4, 16, 1), 0); del buf381 # reuse
# Topologically Sorted Source Nodes: [matmul_54], Original ATen: [aten.bmm]
triton_poi_fused_bmm_10.run(buf376, buf387, 16, grid=grid(16), stream=stream0)
buf388 = buf384; del buf384 # reuse
# Topologically Sorted Source Nodes: [matmul_54], Original ATen: [aten.bmm]
extern_kernels.bmm(buf386, buf387, out=buf388)
buf389 = buf380; del buf380 # reuse
# Topologically Sorted Source Nodes: [softmax_54], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf388, buf389, 64, grid=grid(64), stream=stream0)
buf390 = buf363; del buf363 # reuse
# Topologically Sorted Source Nodes: [tgt_update_30], Original ATen: [aten.cat]
triton_poi_fused_cat_11.run(buf385, buf389, buf376, buf390, 48, grid=grid(48), stream=stream0)
buf391 = reinterpret_tensor(buf387, (4, 4, 1), (4, 1, 16), 0); del buf387 # reuse
# Topologically Sorted Source Nodes: [matmul_55], Original ATen: [aten.bmm]
triton_poi_fused_bmm_12.run(buf375, buf391, 16, grid=grid(16), stream=stream0)
buf392 = reinterpret_tensor(buf386, (4, 1, 4), (4, 16, 1), 0); del buf386 # reuse
# Topologically Sorted Source Nodes: [matmul_55], Original ATen: [aten.bmm]
triton_poi_fused_bmm_13.run(buf376, buf392, 16, grid=grid(16), stream=stream0)
buf393 = buf389; del buf389 # reuse
# Topologically Sorted Source Nodes: [matmul_55], Original ATen: [aten.bmm]
extern_kernels.bmm(buf391, buf392, out=buf393)
buf394 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_55], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf393, buf394, 64, grid=grid(64), stream=stream0)
buf395 = reinterpret_tensor(buf397, (4, 4, 4), (32, 8, 1), 4) # alias
# Topologically Sorted Source Nodes: [tgt_update_31], Original ATen: [aten.cat]
triton_poi_fused_cat_14.run(buf390, buf394, buf376, buf395, 64, grid=grid(64), stream=stream0)
buf398 = reinterpret_tensor(buf394, (16, 4), (4, 1), 0); del buf394 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf397, (16, 8), (8, 1), 0), reinterpret_tensor(primals_17, (8, 4), (1, 8), 0), out=buf398)
buf399 = reinterpret_tensor(buf392, (4, 4), (4, 1), 0); del buf392 # reuse
buf400 = buf399; del buf399 # reuse
# Topologically Sorted Source Nodes: [x_81, add_42, sum_63, v_mean_3], Original ATen: [aten.relu, aten.add, aten.sum, aten.div]
triton_poi_fused_add_div_relu_sum_16.run(buf400, buf398, primals_18, buf344, 16, grid=grid(16), stream=stream0)
buf401 = reinterpret_tensor(buf391, (4, 4), (4, 1), 0); del buf391 # reuse
# Topologically Sorted Source Nodes: [add_43, sum_64, q_mean_3], Original ATen: [aten.add, aten.sum, aten.div]
triton_poi_fused_add_div_sum_17.run(buf372, buf346, buf401, 16, grid=grid(16), stream=stream0)
buf402 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_82], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_20, buf400, reinterpret_tensor(primals_19, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf402)
del primals_20
buf403 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_83], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_22, buf401, reinterpret_tensor(primals_21, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf403)
del primals_22
buf404 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_81, add_42], Original ATen: [aten.relu, aten.add]
triton_poi_fused_add_relu_18.run(buf398, primals_18, buf344, buf404, 64, grid=grid(64), stream=stream0)
buf405 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf404, (16, 4), (4, 1), 0), reinterpret_tensor(primals_23, (4, 12), (1, 4), 0), out=buf405)
buf407 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf406, (16, 4), (4, 1), 0), reinterpret_tensor(primals_25, (4, 12), (1, 4), 0), out=buf407)
buf408 = reinterpret_tensor(buf405, (4, 4, 12), (48, 12, 1), 0); del buf405 # reuse
buf465 = empty_strided_cuda((4, 4, 12), (48, 12, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_85], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_19.run(buf408, primals_24, buf465, 192, grid=grid(192), stream=stream0)
del primals_24
buf409 = reinterpret_tensor(buf407, (4, 4, 12), (48, 12, 1), 0); del buf407 # reuse
buf464 = empty_strided_cuda((4, 4, 12), (48, 12, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_87], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_19.run(buf409, primals_26, buf464, 192, grid=grid(192), stream=stream0)
del primals_26
buf410 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf411 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf422 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add_44, gated_v_query_3, gated_v_key_3, gated_v_val_3], Original ATen: [aten.add, aten.mul]
triton_poi_fused_add_mul_20.run(buf403, buf408, buf410, buf411, buf422, 64, grid=grid(64), stream=stream0)
buf412 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [matmul_56], Original ATen: [aten.bmm]
triton_poi_fused_bmm_3.run(buf410, buf412, 16, grid=grid(16), stream=stream0)
buf413 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_56], Original ATen: [aten.bmm]
triton_poi_fused_bmm_3.run(buf411, buf413, 16, grid=grid(16), stream=stream0)
buf414 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_56], Original ATen: [aten.bmm]
extern_kernels.bmm(buf412, buf413, out=buf414)
buf415 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf416 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf423 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add_47, gated_q_query_3, gated_q_key_3, gated_q_val_3], Original ATen: [aten.add, aten.mul]
triton_poi_fused_add_mul_20.run(buf402, buf409, buf415, buf416, buf423, 64, grid=grid(64), stream=stream0)
buf417 = reinterpret_tensor(buf413, (4, 4, 1), (4, 1, 16), 0); del buf413 # reuse
# Topologically Sorted Source Nodes: [matmul_57], Original ATen: [aten.bmm]
triton_poi_fused_bmm_3.run(buf415, buf417, 16, grid=grid(16), stream=stream0)
buf418 = reinterpret_tensor(buf412, (4, 1, 4), (4, 16, 1), 0); del buf412 # reuse
# Topologically Sorted Source Nodes: [matmul_57], Original ATen: [aten.bmm]
triton_poi_fused_bmm_3.run(buf416, buf418, 16, grid=grid(16), stream=stream0)
buf419 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_57], Original ATen: [aten.bmm]
extern_kernels.bmm(buf417, buf418, out=buf419)
buf420 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_56], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf414, buf420, 64, grid=grid(64), stream=stream0)
buf421 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_57], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf419, buf421, 64, grid=grid(64), stream=stream0)
buf424 = reinterpret_tensor(buf418, (4, 4, 1), (4, 1, 16), 0); del buf418 # reuse
# Topologically Sorted Source Nodes: [matmul_58], Original ATen: [aten.bmm]
triton_poi_fused_bmm_6.run(buf410, buf424, 16, grid=grid(16), stream=stream0)
buf425 = reinterpret_tensor(buf417, (4, 1, 4), (4, 16, 1), 0); del buf417 # reuse
# Topologically Sorted Source Nodes: [matmul_58], Original ATen: [aten.bmm]
triton_poi_fused_bmm_6.run(buf411, buf425, 16, grid=grid(16), stream=stream0)
buf426 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_58], Original ATen: [aten.bmm]
extern_kernels.bmm(buf424, buf425, out=buf426)
buf427 = reinterpret_tensor(buf425, (4, 4, 1), (4, 1, 16), 0); del buf425 # reuse
# Topologically Sorted Source Nodes: [matmul_59], Original ATen: [aten.bmm]
triton_poi_fused_bmm_6.run(buf415, buf427, 16, grid=grid(16), stream=stream0)
buf428 = reinterpret_tensor(buf424, (4, 1, 4), (4, 16, 1), 0); del buf424 # reuse
# Topologically Sorted Source Nodes: [matmul_59], Original ATen: [aten.bmm]
triton_poi_fused_bmm_6.run(buf416, buf428, 16, grid=grid(16), stream=stream0)
buf429 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_59], Original ATen: [aten.bmm]
extern_kernels.bmm(buf427, buf428, out=buf429)
buf430 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_58], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf426, buf430, 64, grid=grid(64), stream=stream0)
buf431 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_59], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf429, buf431, 64, grid=grid(64), stream=stream0)
buf432 = buf385; del buf385 # reuse
# Topologically Sorted Source Nodes: [v_update_13], Original ATen: [aten.cat]
triton_poi_fused_cat_21.run(buf420, buf422, buf430, buf432, 32, grid=grid(32), stream=stream0)
buf433 = buf318; del buf318 # reuse
# Topologically Sorted Source Nodes: [q_update_13], Original ATen: [aten.cat]
triton_poi_fused_cat_21.run(buf421, buf423, buf431, buf433, 32, grid=grid(32), stream=stream0)
buf434 = reinterpret_tensor(buf428, (4, 4, 1), (4, 1, 16), 0); del buf428 # reuse
# Topologically Sorted Source Nodes: [matmul_60], Original ATen: [aten.bmm]
triton_poi_fused_bmm_9.run(buf410, buf434, 16, grid=grid(16), stream=stream0)
buf435 = reinterpret_tensor(buf427, (4, 1, 4), (4, 16, 1), 0); del buf427 # reuse
# Topologically Sorted Source Nodes: [matmul_60], Original ATen: [aten.bmm]
triton_poi_fused_bmm_9.run(buf411, buf435, 16, grid=grid(16), stream=stream0)
buf436 = buf431; del buf431 # reuse
# Topologically Sorted Source Nodes: [matmul_60], Original ATen: [aten.bmm]
extern_kernels.bmm(buf434, buf435, out=buf436)
buf437 = reinterpret_tensor(buf435, (4, 4, 1), (4, 1, 16), 0); del buf435 # reuse
# Topologically Sorted Source Nodes: [matmul_61], Original ATen: [aten.bmm]
triton_poi_fused_bmm_9.run(buf415, buf437, 16, grid=grid(16), stream=stream0)
buf438 = reinterpret_tensor(buf434, (4, 1, 4), (4, 16, 1), 0); del buf434 # reuse
# Topologically Sorted Source Nodes: [matmul_61], Original ATen: [aten.bmm]
triton_poi_fused_bmm_9.run(buf416, buf438, 16, grid=grid(16), stream=stream0)
buf439 = buf421; del buf421 # reuse
# Topologically Sorted Source Nodes: [matmul_61], Original ATen: [aten.bmm]
extern_kernels.bmm(buf437, buf438, out=buf439)
buf440 = buf430; del buf430 # reuse
# Topologically Sorted Source Nodes: [softmax_60], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf436, buf440, 64, grid=grid(64), stream=stream0)
buf441 = buf420; del buf420 # reuse
# Topologically Sorted Source Nodes: [softmax_61], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf439, buf441, 64, grid=grid(64), stream=stream0)
buf442 = buf390; del buf390 # reuse
# Topologically Sorted Source Nodes: [v_update_14], Original ATen: [aten.cat]
triton_poi_fused_cat_22.run(buf432, buf440, buf422, buf442, 48, grid=grid(48), stream=stream0)
del buf432
buf443 = buf328; del buf328 # reuse
# Topologically Sorted Source Nodes: [q_update_14], Original ATen: [aten.cat]
triton_poi_fused_cat_22.run(buf433, buf441, buf423, buf443, 48, grid=grid(48), stream=stream0)
del buf433
buf444 = reinterpret_tensor(buf438, (4, 4, 1), (4, 1, 16), 0); del buf438 # reuse
# Topologically Sorted Source Nodes: [matmul_62], Original ATen: [aten.bmm]
triton_poi_fused_bmm_12.run(buf410, buf444, 16, grid=grid(16), stream=stream0)
buf445 = reinterpret_tensor(buf437, (4, 1, 4), (4, 16, 1), 0); del buf437 # reuse
# Topologically Sorted Source Nodes: [matmul_62], Original ATen: [aten.bmm]
triton_poi_fused_bmm_12.run(buf411, buf445, 16, grid=grid(16), stream=stream0)
buf446 = buf441; del buf441 # reuse
# Topologically Sorted Source Nodes: [matmul_62], Original ATen: [aten.bmm]
extern_kernels.bmm(buf444, buf445, out=buf446)
buf447 = reinterpret_tensor(buf445, (4, 4, 1), (4, 1, 16), 0); del buf445 # reuse
# Topologically Sorted Source Nodes: [matmul_63], Original ATen: [aten.bmm]
triton_poi_fused_bmm_12.run(buf415, buf447, 16, grid=grid(16), stream=stream0)
buf448 = reinterpret_tensor(buf444, (4, 1, 4), (4, 16, 1), 0); del buf444 # reuse
# Topologically Sorted Source Nodes: [matmul_63], Original ATen: [aten.bmm]
triton_poi_fused_bmm_12.run(buf416, buf448, 16, grid=grid(16), stream=stream0)
buf449 = buf440; del buf440 # reuse
# Topologically Sorted Source Nodes: [matmul_63], Original ATen: [aten.bmm]
extern_kernels.bmm(buf447, buf448, out=buf449)
del buf447
del buf448
buf450 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_62], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf446, buf450, 64, grid=grid(64), stream=stream0)
buf451 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_63], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf449, buf451, 64, grid=grid(64), stream=stream0)
buf452 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf454 = buf452; del buf452 # reuse
# Topologically Sorted Source Nodes: [v_update_15, add_50], Original ATen: [aten.cat, aten.add]
triton_poi_fused_add_cat_23.run(buf454, buf442, buf450, buf422, buf404, 64, grid=grid(64), stream=stream0)
del buf442
buf453 = buf450; del buf450 # reuse
buf456 = buf453; del buf453 # reuse
# Topologically Sorted Source Nodes: [q_update_15, add_51], Original ATen: [aten.cat, aten.add]
triton_poi_fused_add_cat_23.run(buf456, buf443, buf451, buf423, buf406, 64, grid=grid(64), stream=stream0)
del buf443
buf455 = reinterpret_tensor(buf451, (16, 4), (4, 1), 0); del buf451 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf454, (16, 4), (4, 1), 0), reinterpret_tensor(primals_27, (4, 4), (1, 4), 0), out=buf455)
buf457 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf456, (16, 4), (4, 1), 0), reinterpret_tensor(primals_29, (4, 4), (1, 4), 0), out=buf457)
buf458 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf459 = buf458; del buf458 # reuse
buf463 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf466 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf473 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf476 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf483 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf486 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf493 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf496 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf503 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_15, x_23, x_37, x_45, x_59, x_67, x_81, x_89, add_56, add_57, add_58, add_59, add_60, add_61, add_62, add_63, add_64], Original ATen: [aten.relu, aten.add, aten.threshold_backward]
triton_poi_fused_add_relu_threshold_backward_26.run(buf459, buf2, buf56, primals_18, buf113, primals_28, buf170, buf227, buf284, buf341, buf398, buf455, buf463, buf466, buf473, buf476, buf483, buf486, buf493, buf496, buf503, 64, grid=grid(64), stream=stream0)
del buf113
del buf170
del buf227
del buf284
del buf341
del buf398
del buf455
del primals_18
del primals_28
buf460 = reinterpret_tensor(buf56, (4, 4, 4), (16, 4, 1), 0); del buf56 # reuse
buf461 = buf460; del buf460 # reuse
buf462 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf472 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf482 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf492 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf469 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf479 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf489 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf499 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf502 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_25, x_47, x_69, x_91, add_65, add_66, add_67, add_68, add_69, add_70, add_71, add_72, add_73], Original ATen: [aten.relu, aten.add, aten.threshold_backward]
triton_poi_fused_add_relu_threshold_backward_27.run(buf461, buf4, buf30, buf115, primals_30, buf144, buf229, buf258, buf343, buf372, buf457, buf462, buf472, buf482, buf492, buf469, buf479, buf489, buf499, buf502, 64, grid=grid(64), stream=stream0)
del buf115
del buf229
del buf343
del buf457
del primals_30
return (buf459, buf461, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(buf4, (16, 4), (4, 1), 0), buf10, reinterpret_tensor(buf7, (4, 1, 4, 1), (32, 32, 8, 1), 4), buf14, reinterpret_tensor(buf7, (4, 1, 4, 1), (32, 32, 8, 1), 5), buf19, reinterpret_tensor(buf7, (4, 1, 4, 1), (32, 32, 8, 1), 6), buf24, reinterpret_tensor(buf7, (4, 1, 4, 1), (32, 32, 8, 1), 7), reinterpret_tensor(buf28, (16, 8), (8, 1), 0), reinterpret_tensor(buf30, (16, 4), (4, 1), 0), buf37, reinterpret_tensor(buf34, (4, 1, 4, 1), (32, 32, 8, 1), 4), buf41, reinterpret_tensor(buf34, (4, 1, 4, 1), (32, 32, 8, 1), 5), buf46, reinterpret_tensor(buf34, (4, 1, 4, 1), (32, 32, 8, 1), 6), buf51, reinterpret_tensor(buf34, (4, 1, 4, 1), (32, 32, 8, 1), 7), reinterpret_tensor(buf55, (16, 8), (8, 1), 0), buf58, buf59, buf60, buf61, reinterpret_tensor(buf62, (16, 4), (4, 1), 0), reinterpret_tensor(buf64, (16, 4), (4, 1), 0), reinterpret_tensor(buf66, (4, 4, 4), (48, 12, 1), 0), reinterpret_tensor(buf66, (4, 4, 4), (48, 12, 1), 4), reinterpret_tensor(buf66, (4, 4, 4), (48, 12, 1), 8), reinterpret_tensor(buf67, (4, 4, 4), (48, 12, 1), 0), reinterpret_tensor(buf67, (4, 4, 4), (48, 12, 1), 4), reinterpret_tensor(buf67, (4, 4, 4), (48, 12, 1), 8), buf72, buf77, reinterpret_tensor(buf80, (4, 1, 4, 1), (16, 16, 4, 1), 0), reinterpret_tensor(buf81, (4, 1, 4, 1), (16, 16, 4, 1), 0), buf84, buf87, reinterpret_tensor(buf80, (4, 1, 4, 1), (16, 16, 4, 1), 1), reinterpret_tensor(buf81, (4, 1, 4, 1), (16, 16, 4, 1), 1), buf94, buf97, reinterpret_tensor(buf80, (4, 1, 4, 1), (16, 16, 4, 1), 2), reinterpret_tensor(buf81, (4, 1, 4, 1), (16, 16, 4, 1), 2), buf104, buf107, reinterpret_tensor(buf80, (4, 1, 4, 1), (16, 16, 4, 1), 3), reinterpret_tensor(buf81, (4, 1, 4, 1), (16, 16, 4, 1), 3), reinterpret_tensor(buf112, (16, 4), (4, 1), 0), reinterpret_tensor(buf114, (16, 4), (4, 1), 0), reinterpret_tensor(buf116, (16, 4), (4, 1), 0), reinterpret_tensor(buf118, (16, 4), (4, 1), 0), buf124, reinterpret_tensor(buf121, (4, 1, 4, 1), (32, 32, 8, 1), 4), buf128, reinterpret_tensor(buf121, (4, 1, 4, 1), (32, 32, 8, 1), 5), buf133, reinterpret_tensor(buf121, (4, 1, 4, 1), (32, 32, 8, 1), 6), buf138, reinterpret_tensor(buf121, (4, 1, 4, 1), (32, 32, 8, 1), 7), reinterpret_tensor(buf142, (16, 8), (8, 1), 0), reinterpret_tensor(buf144, (16, 4), (4, 1), 0), buf151, reinterpret_tensor(buf148, (4, 1, 4, 1), (32, 32, 8, 1), 4), buf155, reinterpret_tensor(buf148, (4, 1, 4, 1), (32, 32, 8, 1), 5), buf160, reinterpret_tensor(buf148, (4, 1, 4, 1), (32, 32, 8, 1), 6), buf165, reinterpret_tensor(buf148, (4, 1, 4, 1), (32, 32, 8, 1), 7), reinterpret_tensor(buf169, (16, 8), (8, 1), 0), buf172, buf173, buf174, buf175, reinterpret_tensor(buf176, (16, 4), (4, 1), 0), reinterpret_tensor(buf178, (16, 4), (4, 1), 0), reinterpret_tensor(buf180, (4, 4, 4), (48, 12, 1), 0), reinterpret_tensor(buf180, (4, 4, 4), (48, 12, 1), 4), reinterpret_tensor(buf180, (4, 4, 4), (48, 12, 1), 8), reinterpret_tensor(buf181, (4, 4, 4), (48, 12, 1), 0), reinterpret_tensor(buf181, (4, 4, 4), (48, 12, 1), 4), reinterpret_tensor(buf181, (4, 4, 4), (48, 12, 1), 8), buf186, buf191, reinterpret_tensor(buf194, (4, 1, 4, 1), (16, 16, 4, 1), 0), reinterpret_tensor(buf195, (4, 1, 4, 1), (16, 16, 4, 1), 0), buf198, buf201, reinterpret_tensor(buf194, (4, 1, 4, 1), (16, 16, 4, 1), 1), reinterpret_tensor(buf195, (4, 1, 4, 1), (16, 16, 4, 1), 1), buf208, buf211, reinterpret_tensor(buf194, (4, 1, 4, 1), (16, 16, 4, 1), 2), reinterpret_tensor(buf195, (4, 1, 4, 1), (16, 16, 4, 1), 2), buf218, buf221, reinterpret_tensor(buf194, (4, 1, 4, 1), (16, 16, 4, 1), 3), reinterpret_tensor(buf195, (4, 1, 4, 1), (16, 16, 4, 1), 3), reinterpret_tensor(buf226, (16, 4), (4, 1), 0), reinterpret_tensor(buf228, (16, 4), (4, 1), 0), reinterpret_tensor(buf230, (16, 4), (4, 1), 0), reinterpret_tensor(buf232, (16, 4), (4, 1), 0), buf238, reinterpret_tensor(buf235, (4, 1, 4, 1), (32, 32, 8, 1), 4), buf242, reinterpret_tensor(buf235, (4, 1, 4, 1), (32, 32, 8, 1), 5), buf247, reinterpret_tensor(buf235, (4, 1, 4, 1), (32, 32, 8, 1), 6), buf252, reinterpret_tensor(buf235, (4, 1, 4, 1), (32, 32, 8, 1), 7), reinterpret_tensor(buf256, (16, 8), (8, 1), 0), reinterpret_tensor(buf258, (16, 4), (4, 1), 0), buf265, reinterpret_tensor(buf262, (4, 1, 4, 1), (32, 32, 8, 1), 4), buf269, reinterpret_tensor(buf262, (4, 1, 4, 1), (32, 32, 8, 1), 5), buf274, reinterpret_tensor(buf262, (4, 1, 4, 1), (32, 32, 8, 1), 6), buf279, reinterpret_tensor(buf262, (4, 1, 4, 1), (32, 32, 8, 1), 7), reinterpret_tensor(buf283, (16, 8), (8, 1), 0), buf286, buf287, buf288, buf289, reinterpret_tensor(buf290, (16, 4), (4, 1), 0), reinterpret_tensor(buf292, (16, 4), (4, 1), 0), reinterpret_tensor(buf294, (4, 4, 4), (48, 12, 1), 0), reinterpret_tensor(buf294, (4, 4, 4), (48, 12, 1), 4), reinterpret_tensor(buf294, (4, 4, 4), (48, 12, 1), 8), reinterpret_tensor(buf295, (4, 4, 4), (48, 12, 1), 0), reinterpret_tensor(buf295, (4, 4, 4), (48, 12, 1), 4), reinterpret_tensor(buf295, (4, 4, 4), (48, 12, 1), 8), buf300, buf305, reinterpret_tensor(buf308, (4, 1, 4, 1), (16, 16, 4, 1), 0), reinterpret_tensor(buf309, (4, 1, 4, 1), (16, 16, 4, 1), 0), buf312, buf315, reinterpret_tensor(buf308, (4, 1, 4, 1), (16, 16, 4, 1), 1), reinterpret_tensor(buf309, (4, 1, 4, 1), (16, 16, 4, 1), 1), buf322, buf325, reinterpret_tensor(buf308, (4, 1, 4, 1), (16, 16, 4, 1), 2), reinterpret_tensor(buf309, (4, 1, 4, 1), (16, 16, 4, 1), 2), buf332, buf335, reinterpret_tensor(buf308, (4, 1, 4, 1), (16, 16, 4, 1), 3), reinterpret_tensor(buf309, (4, 1, 4, 1), (16, 16, 4, 1), 3), reinterpret_tensor(buf340, (16, 4), (4, 1), 0), reinterpret_tensor(buf342, (16, 4), (4, 1), 0), reinterpret_tensor(buf344, (16, 4), (4, 1), 0), reinterpret_tensor(buf346, (16, 4), (4, 1), 0), buf352, reinterpret_tensor(buf349, (4, 1, 4, 1), (32, 32, 8, 1), 4), buf356, reinterpret_tensor(buf349, (4, 1, 4, 1), (32, 32, 8, 1), 5), buf361, reinterpret_tensor(buf349, (4, 1, 4, 1), (32, 32, 8, 1), 6), buf366, reinterpret_tensor(buf349, (4, 1, 4, 1), (32, 32, 8, 1), 7), reinterpret_tensor(buf370, (16, 8), (8, 1), 0), reinterpret_tensor(buf372, (16, 4), (4, 1), 0), buf379, reinterpret_tensor(buf376, (4, 1, 4, 1), (32, 32, 8, 1), 4), buf383, reinterpret_tensor(buf376, (4, 1, 4, 1), (32, 32, 8, 1), 5), buf388, reinterpret_tensor(buf376, (4, 1, 4, 1), (32, 32, 8, 1), 6), buf393, reinterpret_tensor(buf376, (4, 1, 4, 1), (32, 32, 8, 1), 7), reinterpret_tensor(buf397, (16, 8), (8, 1), 0), buf400, buf401, buf402, buf403, reinterpret_tensor(buf404, (16, 4), (4, 1), 0), reinterpret_tensor(buf406, (16, 4), (4, 1), 0), reinterpret_tensor(buf408, (4, 4, 4), (48, 12, 1), 0), reinterpret_tensor(buf408, (4, 4, 4), (48, 12, 1), 4), reinterpret_tensor(buf408, (4, 4, 4), (48, 12, 1), 8), reinterpret_tensor(buf409, (4, 4, 4), (48, 12, 1), 0), reinterpret_tensor(buf409, (4, 4, 4), (48, 12, 1), 4), reinterpret_tensor(buf409, (4, 4, 4), (48, 12, 1), 8), buf414, buf419, reinterpret_tensor(buf422, (4, 1, 4, 1), (16, 16, 4, 1), 0), reinterpret_tensor(buf423, (4, 1, 4, 1), (16, 16, 4, 1), 0), buf426, buf429, reinterpret_tensor(buf422, (4, 1, 4, 1), (16, 16, 4, 1), 1), reinterpret_tensor(buf423, (4, 1, 4, 1), (16, 16, 4, 1), 1), buf436, buf439, reinterpret_tensor(buf422, (4, 1, 4, 1), (16, 16, 4, 1), 2), reinterpret_tensor(buf423, (4, 1, 4, 1), (16, 16, 4, 1), 2), buf446, buf449, reinterpret_tensor(buf422, (4, 1, 4, 1), (16, 16, 4, 1), 3), reinterpret_tensor(buf423, (4, 1, 4, 1), (16, 16, 4, 1), 3), reinterpret_tensor(buf454, (16, 4), (4, 1), 0), reinterpret_tensor(buf456, (16, 4), (4, 1), 0), buf462, primals_29, buf463, primals_27, reinterpret_tensor(buf415, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf416, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf410, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf411, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf415, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf416, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf410, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf411, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf415, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf416, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf410, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf411, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf415, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf416, (4, 4, 1), (16, 4, 1), 0), reinterpret_tensor(buf410, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf411, (4, 4, 1), (16, 4, 1), 0), buf464, primals_25, buf465, primals_23, primals_21, primals_19, buf466, primals_17, reinterpret_tensor(buf375, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf376, (4, 4, 1), (32, 8, 1), 3), reinterpret_tensor(buf375, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf376, (4, 4, 1), (32, 8, 1), 2), reinterpret_tensor(buf375, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf376, (4, 4, 1), (32, 8, 1), 1), reinterpret_tensor(buf375, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf376, (4, 4, 1), (32, 8, 1), 0), buf467, primals_15, buf468, primals_13, buf469, primals_11, reinterpret_tensor(buf348, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf349, (4, 4, 1), (32, 8, 1), 3), reinterpret_tensor(buf348, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf349, (4, 4, 1), (32, 8, 1), 2), reinterpret_tensor(buf348, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf349, (4, 4, 1), (32, 8, 1), 1), reinterpret_tensor(buf348, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf349, (4, 4, 1), (32, 8, 1), 0), buf470, primals_9, buf471, primals_7, buf472, buf473, reinterpret_tensor(buf301, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf302, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf296, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf297, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf301, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf302, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf296, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf297, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf301, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf302, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf296, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf297, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf301, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf302, (4, 4, 1), (16, 4, 1), 0), reinterpret_tensor(buf296, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf297, (4, 4, 1), (16, 4, 1), 0), buf474, buf475, buf476, reinterpret_tensor(buf261, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf262, (4, 4, 1), (32, 8, 1), 3), reinterpret_tensor(buf261, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf262, (4, 4, 1), (32, 8, 1), 2), reinterpret_tensor(buf261, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf262, (4, 4, 1), (32, 8, 1), 1), reinterpret_tensor(buf261, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf262, (4, 4, 1), (32, 8, 1), 0), buf477, buf478, buf479, reinterpret_tensor(buf234, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf235, (4, 4, 1), (32, 8, 1), 3), reinterpret_tensor(buf234, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf235, (4, 4, 1), (32, 8, 1), 2), reinterpret_tensor(buf234, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf235, (4, 4, 1), (32, 8, 1), 1), reinterpret_tensor(buf234, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf235, (4, 4, 1), (32, 8, 1), 0), buf480, buf481, buf482, buf483, reinterpret_tensor(buf187, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf188, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf182, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf183, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf187, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf188, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf182, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf183, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf187, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf188, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf182, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf183, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf187, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf188, (4, 4, 1), (16, 4, 1), 0), reinterpret_tensor(buf182, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf183, (4, 4, 1), (16, 4, 1), 0), buf484, buf485, buf486, reinterpret_tensor(buf147, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf148, (4, 4, 1), (32, 8, 1), 3), reinterpret_tensor(buf147, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf148, (4, 4, 1), (32, 8, 1), 2), reinterpret_tensor(buf147, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf148, (4, 4, 1), (32, 8, 1), 1), reinterpret_tensor(buf147, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf148, (4, 4, 1), (32, 8, 1), 0), buf487, buf488, buf489, reinterpret_tensor(buf120, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf121, (4, 4, 1), (32, 8, 1), 3), reinterpret_tensor(buf120, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf121, (4, 4, 1), (32, 8, 1), 2), reinterpret_tensor(buf120, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf121, (4, 4, 1), (32, 8, 1), 1), reinterpret_tensor(buf120, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf121, (4, 4, 1), (32, 8, 1), 0), buf490, buf491, buf492, buf493, reinterpret_tensor(buf73, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf74, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf68, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf69, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf73, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf74, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf68, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf69, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf73, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf74, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf68, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf69, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf73, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf74, (4, 4, 1), (16, 4, 1), 0), reinterpret_tensor(buf68, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf69, (4, 4, 1), (16, 4, 1), 0), buf494, buf495, buf496, reinterpret_tensor(buf33, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf34, (4, 4, 1), (32, 8, 1), 3), reinterpret_tensor(buf33, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf34, (4, 4, 1), (32, 8, 1), 2), reinterpret_tensor(buf33, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf34, (4, 4, 1), (32, 8, 1), 1), reinterpret_tensor(buf33, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf34, (4, 4, 1), (32, 8, 1), 0), buf497, buf498, buf499, reinterpret_tensor(buf6, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf7, (4, 4, 1), (32, 8, 1), 3), reinterpret_tensor(buf6, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf7, (4, 4, 1), (32, 8, 1), 2), reinterpret_tensor(buf6, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf7, (4, 4, 1), (32, 8, 1), 1), reinterpret_tensor(buf6, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf7, (4, 4, 1), (32, 8, 1), 0), buf500, buf501, buf502, buf503, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_28 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_29 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_30 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class FCNet(nn.Module):
def __init__(self, in_size, out_size, activate=None, drop=0.0):
super(FCNet, self).__init__()
self.lin = nn.Linear(in_size, out_size)
self.drop_value = drop
self.drop = nn.Dropout(drop)
self.activate = activate.lower() if activate is not None else None
if activate == 'relu':
self.ac_fn = nn.ReLU()
elif activate == 'sigmoid':
self.ac_fn = nn.Sigmoid()
elif activate == 'tanh':
self.ac_fn = nn.Tanh()
def forward(self, x):
if self.drop_value > 0:
x = self.drop(x)
x = self.lin(x)
if self.activate is not None:
x = self.ac_fn(x)
return x
class OneSideInterModalityUpdate(nn.Module):
"""
one-side Inter-Modality Attention Flow
according to the paper, instead of parallel V->Q & Q->V, we first to V->Q and then Q->V
"""
def __init__(self, src_size, tgt_size, output_size, num_head, drop=0.0):
super(OneSideInterModalityUpdate, self).__init__()
self.src_size = src_size
self.tgt_size = tgt_size
self.output_size = output_size
self.num_head = num_head
self.src_lin = FCNet(src_size, output_size * 2, drop=drop, activate
='relu')
self.tgt_lin = FCNet(tgt_size, output_size, drop=drop, activate='relu')
self.tgt_output = FCNet(output_size + tgt_size, output_size, drop=
drop, activate='relu')
def forward(self, src, tgt):
"""
:param src: eeg feature [batch, regions, feature_size]
:param tgt: eye feature [batch, regions, feature_size]
:return:
"""
_batch_size, _num_src = src.shape[0], src.shape[1]
tgt.shape[1]
src_tran = self.src_lin(src)
tgt_tran = self.tgt_lin(tgt)
src_key, src_val = torch.split(src_tran, src_tran.size(2) // 2, dim=2)
tgt_query = tgt_tran
src_key_set = torch.split(src_key, src_key.size(2) // self.num_head,
dim=2)
src_val_set = torch.split(src_val, src_val.size(2) // self.num_head,
dim=2)
tgt_query_set = torch.split(tgt_query, tgt_query.size(2) // self.
num_head, dim=2)
for i in range(self.num_head):
src_key_slice, tgt_query_slice, src_val_slice = src_key_set[i
], tgt_query_set[i], src_val_set[i]
src2tgt = tgt_query_slice @ src_key_slice.transpose(1, 2) / (self
.output_size // self.num_head) ** 0.5
interMAF_src2tgt = F.softmax(src2tgt, dim=2).unsqueeze(3)
tgt_update = (interMAF_src2tgt * src_val_slice.unsqueeze(1)).sum(2
) if i == 0 else torch.cat((tgt_update, (interMAF_src2tgt *
src_val_slice.unsqueeze(1)).sum(2)), dim=2)
cat_tgt = torch.cat((tgt, tgt_update), dim=2)
tgt_updated = self.tgt_output(cat_tgt)
return tgt_updated
class DyIntraModalityUpdate(nn.Module):
"""
Dynamic Intra-Modality Attention Flow
"""
def __init__(self, v_size, q_size, output_size, num_head, drop=0.0):
super(DyIntraModalityUpdate, self).__init__()
self.v_size = v_size
self.q_size = q_size
self.output_size = output_size
self.num_head = num_head
self.v4q_gate_lin = FCNet(v_size, output_size, drop=drop)
self.q4v_gate_lin = FCNet(q_size, output_size, drop=drop)
self.v_lin = FCNet(v_size, output_size * 3, drop=drop, activate='relu')
self.q_lin = FCNet(q_size, output_size * 3, drop=drop, activate='relu')
self.v_output = FCNet(output_size, output_size, drop=drop, activate
='relu')
self.q_output = FCNet(output_size, output_size, drop=drop, activate
='relu')
self.relu = nn.ReLU()
self.tanh = nn.Tanh()
self.sigmoid = nn.Sigmoid()
def forward(self, v, q):
"""
:param v: [batch_size, num_obj, feature_size]
:param q: [batch_size, max_len, feature_size]
:return:
"""
_batch_size, num_obj = v.shape[0], v.shape[1]
max_len = q.shape[1]
v_mean = v.sum(1) / num_obj
q_mean = q.sum(1) / max_len
v4q_gate = self.sigmoid(self.v4q_gate_lin(v_mean)).unsqueeze(1)
q4v_gate = self.sigmoid(self.q4v_gate_lin(q_mean)).unsqueeze(1)
v_tran = self.v_lin(v)
q_tran = self.q_lin(q)
v_key, v_query, v_val = torch.split(v_tran, v_tran.size(2) // 3, dim=2)
q_key, q_query, q_val = torch.split(q_tran, q_tran.size(2) // 3, dim=2)
gated_v_query = (1 + q4v_gate) * v_query
gated_v_key = (1 + q4v_gate) * v_key
gated_v_val = (1 + q4v_gate) * v_val
gated_q_query = (1 + v4q_gate) * q_query
gated_q_key = (1 + v4q_gate) * q_key
gated_q_val = (1 + v4q_gate) * q_val
v_key_set = torch.split(gated_v_key, gated_v_key.size(2) // self.
num_head, dim=2)
v_query_set = torch.split(gated_v_query, gated_v_query.size(2) //
self.num_head, dim=2)
v_val_set = torch.split(gated_v_val, gated_v_val.size(2) // self.
num_head, dim=2)
q_key_set = torch.split(gated_q_key, gated_q_key.size(2) // self.
num_head, dim=2)
q_query_set = torch.split(gated_q_query, gated_q_query.size(2) //
self.num_head, dim=2)
q_val_set = torch.split(gated_q_val, gated_q_val.size(2) // self.
num_head, dim=2)
for i in range(self.num_head):
v_key_slice, v_query_slice, v_val_slice = v_key_set[i
], v_query_set[i], v_val_set[i]
q_key_slice, q_query_slice, q_val_slice = q_key_set[i
], q_query_set[i], q_val_set[i]
v2v = v_query_slice @ v_key_slice.transpose(1, 2) / (self.
output_size // self.num_head) ** 0.5
q2q = q_query_slice @ q_key_slice.transpose(1, 2) / (self.
output_size // self.num_head) ** 0.5
dyIntranMAF_v2v = F.softmax(v2v, dim=2).unsqueeze(3)
dyIntranMAF_q2q = F.softmax(q2q, dim=2).unsqueeze(3)
v_update = (dyIntranMAF_v2v * v_val_slice.unsqueeze(1)).sum(2
) if i == 0 else torch.cat((v_update, (dyIntranMAF_v2v *
v_val_slice.unsqueeze(1)).sum(2)), dim=2)
q_update = (dyIntranMAF_q2q * q_val_slice.unsqueeze(1)).sum(2
) if i == 0 else torch.cat((q_update, (dyIntranMAF_q2q *
q_val_slice.unsqueeze(1)).sum(2)), dim=2)
updated_v = self.v_output(v + v_update)
updated_q = self.q_output(q + q_update)
return updated_v, updated_q
class SingleBlock(nn.Module):
"""
Single Block Inter- and Intra modality stack multiple times, in such circumstance, all the
basic blocks share the same parameters in the model
"""
def __init__(self, num_blocks, v_size, q_size, output_size,
num_inter_head, num_intra_head, drop=0.0):
super(SingleBlock, self).__init__()
self.v_size = v_size
self.q_size = q_size
self.output_size = output_size
self.num_inter_head = num_inter_head
self.num_intra_head = num_intra_head
self.num_block = num_blocks
self.v_lin = FCNet(v_size, output_size, drop=drop, activate='relu')
self.q_lin = FCNet(q_size, output_size, drop=drop, activate='relu')
self.v2q_interBlock = OneSideInterModalityUpdate(output_size,
output_size, output_size, num_inter_head, drop)
self.q2v_interBlock = OneSideInterModalityUpdate(output_size,
output_size, output_size, num_inter_head, drop)
self.intraBlock = DyIntraModalityUpdate(output_size, output_size,
output_size, num_intra_head, drop)
def forward(self, v, q):
"""
:param v: eeg feature [batch_size, regions, feature_size]
:param q: eye feature [batch_size, regions, feature_size]
:return:
"""
v = self.v_lin(v)
q = self.q_lin(q)
v_container = [v]
q_container = [q]
result_v = [v]
result_q = [q]
for i in range(self.num_block):
q1 = self.v2q_interBlock(v_container[-1], q_container[-1])
q_container.append(q1)
v1 = self.q2v_interBlock(q_container[-1], v_container[-1])
v_container.append(v1)
v2, q2 = self.intraBlock(v_container[-1] + v_container[-2],
q_container[-1] + q_container[-2])
v_container.append(v2)
q_container.append(q2)
result_v.append(v1)
result_v.append(v2)
result_q.append(q1)
result_q.append(q2)
v_container.append(v_container[-1] + v_container[-2] +
v_container[-3])
q_container.append(q_container[-1] + q_container[-2] +
q_container[-3])
return sum(result_v), sum(result_q)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'num_blocks': 4, 'v_size': 4, 'q_size': 4, 'output_size':
4, 'num_inter_head': 4, 'num_intra_head': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_relu_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + (x0 + 8 * x1), tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_bmm_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_bmm_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 8 * x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_bmm_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_bmm_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (1 + 8 * x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_cat_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x3 = xindex // 2
x2 = xindex // 8
x5 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + 4 * x3, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (1 + 4 * x3), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr0 + (2 + 4 * x3), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp9 = tmp7 + tmp8
tmp10 = tl.load(in_ptr0 + (3 + 4 * x3), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tmp5 / tmp11
tmp13 = tl.load(in_ptr1 + (4 + 32 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp14 = tmp12 * tmp13
tmp15 = tmp6 / tmp11
tmp16 = tl.load(in_ptr1 + (12 + 32 * x2), tmp4 & xmask, eviction_policy
='evict_last', other=0.0)
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp19 = tmp8 / tmp11
tmp20 = tl.load(in_ptr1 + (20 + 32 * x2), tmp4 & xmask, eviction_policy
='evict_last', other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp10 / tmp11
tmp24 = tl.load(in_ptr1 + (28 + 32 * x2), tmp4 & xmask, eviction_policy
='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype)
tmp28 = tl.where(tmp4, tmp26, tmp27)
tmp29 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp32 = tl.load(in_ptr2 + 4 * x3, tmp29 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp33 = tl.load(in_ptr2 + (1 + 4 * x3), tmp29 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp34 = tmp32 + tmp33
tmp35 = tl.load(in_ptr2 + (2 + 4 * x3), tmp29 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp36 = tmp34 + tmp35
tmp37 = tl.load(in_ptr2 + (3 + 4 * x3), tmp29 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp38 = tmp36 + tmp37
tmp39 = tmp32 / tmp38
tmp40 = tl.load(in_ptr1 + (5 + 32 * x2), tmp29 & xmask, eviction_policy
='evict_last', other=0.0)
tmp41 = tmp39 * tmp40
tmp42 = tmp33 / tmp38
tmp43 = tl.load(in_ptr1 + (13 + 32 * x2), tmp29 & xmask,
eviction_policy='evict_last', other=0.0)
tmp44 = tmp42 * tmp43
tmp45 = tmp41 + tmp44
tmp46 = tmp35 / tmp38
tmp47 = tl.load(in_ptr1 + (21 + 32 * x2), tmp29 & xmask,
eviction_policy='evict_last', other=0.0)
tmp48 = tmp46 * tmp47
tmp49 = tmp45 + tmp48
tmp50 = tmp37 / tmp38
tmp51 = tl.load(in_ptr1 + (29 + 32 * x2), tmp29 & xmask,
eviction_policy='evict_last', other=0.0)
tmp52 = tmp50 * tmp51
tmp53 = tmp49 + tmp52
tmp54 = tl.full(tmp53.shape, 0.0, tmp53.dtype)
tmp55 = tl.where(tmp29, tmp53, tmp54)
tmp56 = tl.where(tmp4, tmp28, tmp55)
tl.store(out_ptr0 + x5, tmp56, xmask)
@triton.jit
def triton_poi_fused_bmm_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_bmm_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (2 + 8 * x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_cat_11(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x3 = xindex // 3
x2 = xindex // 12
x5 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (2 * x3 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 3, tl.int64)
tmp9 = tl.load(in_ptr1 + 4 * x3, tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp10 = tl.load(in_ptr1 + (1 + 4 * x3), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.load(in_ptr1 + (2 + 4 * x3), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.load(in_ptr1 + (3 + 4 * x3), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tmp13 + tmp14
tmp16 = tmp9 / tmp15
tmp17 = tl.load(in_ptr2 + (6 + 32 * x2), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp18 = tmp16 * tmp17
tmp19 = tmp10 / tmp15
tmp20 = tl.load(in_ptr2 + (14 + 32 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp12 / tmp15
tmp24 = tl.load(in_ptr2 + (22 + 32 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tmp14 / tmp15
tmp28 = tl.load(in_ptr2 + (30 + 32 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp29 = tmp27 * tmp28
tmp30 = tmp26 + tmp29
tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype)
tmp32 = tl.where(tmp6, tmp30, tmp31)
tmp33 = tl.where(tmp4, tmp5, tmp32)
tl.store(out_ptr0 + x5, tmp33, xmask)
@triton.jit
def triton_poi_fused_bmm_12(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_bmm_13(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (3 + 8 * x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_cat_14(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x3 = xindex // 4
x2 = xindex // 16
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 3, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (3 * x3 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 4, tl.int64)
tmp9 = tl.load(in_ptr1 + 4 * x3, tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp10 = tl.load(in_ptr1 + (1 + 4 * x3), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.load(in_ptr1 + (2 + 4 * x3), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.load(in_ptr1 + (3 + 4 * x3), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tmp13 + tmp14
tmp16 = tmp9 / tmp15
tmp17 = tl.load(in_ptr2 + (7 + 32 * x2), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp18 = tmp16 * tmp17
tmp19 = tmp10 / tmp15
tmp20 = tl.load(in_ptr2 + (15 + 32 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp12 / tmp15
tmp24 = tl.load(in_ptr2 + (23 + 32 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tmp14 / tmp15
tmp28 = tl.load(in_ptr2 + (31 + 32 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp29 = tmp27 * tmp28
tmp30 = tmp26 + tmp29
tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype)
tmp32 = tl.where(tmp6, tmp30, tmp31)
tmp33 = tl.where(tmp4, tmp5, tmp32)
tl.store(out_ptr0 + (x0 + 8 * x3), tmp33, xmask)
@triton.jit
def triton_poi_fused_add_relu_15(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp4 + tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_div_relu_sum_16(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x0 + 16 * x1), xmask)
tmp7 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp10 = tl.load(in_ptr2 + (4 + x0 + 16 * x1), xmask)
tmp13 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp16 = tl.load(in_ptr2 + (8 + x0 + 16 * x1), xmask)
tmp19 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp22 = tl.load(in_ptr2 + (12 + x0 + 16 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp4 + tmp5
tmp8 = tmp7 + tmp1
tmp9 = triton_helpers.maximum(tmp3, tmp8)
tmp11 = tmp9 + tmp10
tmp12 = tmp6 + tmp11
tmp14 = tmp13 + tmp1
tmp15 = triton_helpers.maximum(tmp3, tmp14)
tmp17 = tmp15 + tmp16
tmp18 = tmp12 + tmp17
tmp20 = tmp19 + tmp1
tmp21 = triton_helpers.maximum(tmp3, tmp20)
tmp23 = tmp21 + tmp22
tmp24 = tmp18 + tmp23
tmp25 = 0.25
tmp26 = tmp24 * tmp25
tl.store(in_out_ptr0 + x2, tmp26, xmask)
@triton.jit
def triton_poi_fused_add_div_sum_17(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp4 = tl.load(in_ptr1 + (4 + x0 + 16 * x1), xmask)
tmp7 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp8 = tl.load(in_ptr1 + (8 + x0 + 16 * x1), xmask)
tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp12 = tl.load(in_ptr1 + (12 + x0 + 16 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_add_relu_18(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp4 + tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_19(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 12
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_mul_20(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex // 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp4 = tl.load(in_ptr1 + (4 + x0 + 12 * x3), xmask)
tmp6 = tl.load(in_ptr1 + (x0 + 12 * x3), xmask)
tmp8 = tl.load(in_ptr1 + (8 + x0 + 12 * x3), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp5 = tmp3 * tmp4
tmp7 = tmp3 * tmp6
tmp9 = tmp3 * tmp8
tl.store(out_ptr0 + x4, tmp5, xmask)
tl.store(out_ptr1 + x4, tmp7, xmask)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_cat_21(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x3 = xindex // 2
x2 = xindex // 8
x5 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + 4 * x3, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (1 + 4 * x3), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr0 + (2 + 4 * x3), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp9 = tmp7 + tmp8
tmp10 = tl.load(in_ptr0 + (3 + 4 * x3), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tmp5 / tmp11
tmp13 = tl.load(in_ptr1 + 16 * x2, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp14 = tmp12 * tmp13
tmp15 = tmp6 / tmp11
tmp16 = tl.load(in_ptr1 + (4 + 16 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp19 = tmp8 / tmp11
tmp20 = tl.load(in_ptr1 + (8 + 16 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp10 / tmp11
tmp24 = tl.load(in_ptr1 + (12 + 16 * x2), tmp4 & xmask, eviction_policy
='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype)
tmp28 = tl.where(tmp4, tmp26, tmp27)
tmp29 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp32 = tl.load(in_ptr2 + 4 * x3, tmp29 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp33 = tl.load(in_ptr2 + (1 + 4 * x3), tmp29 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp34 = tmp32 + tmp33
tmp35 = tl.load(in_ptr2 + (2 + 4 * x3), tmp29 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp36 = tmp34 + tmp35
tmp37 = tl.load(in_ptr2 + (3 + 4 * x3), tmp29 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp38 = tmp36 + tmp37
tmp39 = tmp32 / tmp38
tmp40 = tl.load(in_ptr1 + (1 + 16 * x2), tmp29 & xmask, eviction_policy
='evict_last', other=0.0)
tmp41 = tmp39 * tmp40
tmp42 = tmp33 / tmp38
tmp43 = tl.load(in_ptr1 + (5 + 16 * x2), tmp29 & xmask, eviction_policy
='evict_last', other=0.0)
tmp44 = tmp42 * tmp43
tmp45 = tmp41 + tmp44
tmp46 = tmp35 / tmp38
tmp47 = tl.load(in_ptr1 + (9 + 16 * x2), tmp29 & xmask, eviction_policy
='evict_last', other=0.0)
tmp48 = tmp46 * tmp47
tmp49 = tmp45 + tmp48
tmp50 = tmp37 / tmp38
tmp51 = tl.load(in_ptr1 + (13 + 16 * x2), tmp29 & xmask,
eviction_policy='evict_last', other=0.0)
tmp52 = tmp50 * tmp51
tmp53 = tmp49 + tmp52
tmp54 = tl.full(tmp53.shape, 0.0, tmp53.dtype)
tmp55 = tl.where(tmp29, tmp53, tmp54)
tmp56 = tl.where(tmp4, tmp28, tmp55)
tl.store(out_ptr0 + x5, tmp56, xmask)
@triton.jit
def triton_poi_fused_cat_22(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x3 = xindex // 3
x2 = xindex // 12
x5 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (2 * x3 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 3, tl.int64)
tmp9 = tl.load(in_ptr1 + 4 * x3, tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp10 = tl.load(in_ptr1 + (1 + 4 * x3), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.load(in_ptr1 + (2 + 4 * x3), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.load(in_ptr1 + (3 + 4 * x3), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tmp13 + tmp14
tmp16 = tmp9 / tmp15
tmp17 = tl.load(in_ptr2 + (2 + 16 * x2), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp18 = tmp16 * tmp17
tmp19 = tmp10 / tmp15
tmp20 = tl.load(in_ptr2 + (6 + 16 * x2), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp12 / tmp15
tmp24 = tl.load(in_ptr2 + (10 + 16 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tmp14 / tmp15
tmp28 = tl.load(in_ptr2 + (14 + 16 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp29 = tmp27 * tmp28
tmp30 = tmp26 + tmp29
tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype)
tmp32 = tl.where(tmp6, tmp30, tmp31)
tmp33 = tl.where(tmp4, tmp5, tmp32)
tl.store(out_ptr0 + x5, tmp33, xmask)
@triton.jit
def triton_poi_fused_add_cat_23(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
in_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x4 = xindex // 4
x2 = xindex // 16
x3 = xindex
tmp34 = tl.load(in_ptr3 + x3, xmask)
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 3, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (3 * x4 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 4, tl.int64)
tmp9 = tl.load(in_ptr1 + 4 * x4, tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp10 = tl.load(in_ptr1 + (1 + 4 * x4), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.load(in_ptr1 + (2 + 4 * x4), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.load(in_ptr1 + (3 + 4 * x4), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tmp13 + tmp14
tmp16 = tmp9 / tmp15
tmp17 = tl.load(in_ptr2 + (3 + 16 * x2), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp18 = tmp16 * tmp17
tmp19 = tmp10 / tmp15
tmp20 = tl.load(in_ptr2 + (7 + 16 * x2), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp12 / tmp15
tmp24 = tl.load(in_ptr2 + (11 + 16 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tmp14 / tmp15
tmp28 = tl.load(in_ptr2 + (15 + 16 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp29 = tmp27 * tmp28
tmp30 = tmp26 + tmp29
tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype)
tmp32 = tl.where(tmp6, tmp30, tmp31)
tmp33 = tl.where(tmp4, tmp5, tmp32)
tmp35 = tmp34 + tmp33
tl.store(in_out_ptr0 + x3, tmp35, xmask)
@triton.jit
def triton_poi_fused_add_cat_relu_24(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x2, xmask)
tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp7 = tmp5 + tmp6
tmp8 = triton_helpers.maximum(tmp3, tmp7)
tmp9 = tmp4 + tmp8
tmp11 = tmp9 + tmp10
tl.store(out_ptr0 + x2, tmp11, xmask)
tl.store(out_ptr1 + (x0 + 8 * x1), tmp11, xmask)
@triton.jit
def triton_poi_fused_add_cat_relu_25(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x2, xmask)
tmp7 = tl.load(in_ptr3 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
tl.store(out_ptr1 + (x0 + 8 * x1), tmp8, xmask)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_26(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8,
in_ptr9, in_ptr10, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4,
out_ptr5, out_ptr6, out_ptr7, out_ptr8, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr3 + x2, xmask)
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr5 + x2, xmask)
tmp18 = tl.load(in_ptr6 + x2, xmask)
tmp22 = tl.load(in_ptr7 + x2, xmask)
tmp26 = tl.load(in_ptr8 + x2, xmask)
tmp30 = tl.load(in_ptr9 + x2, xmask)
tmp34 = tl.load(in_ptr10 + x2, xmask)
tmp1 = 0.0
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tl.full([1], 0, tl.int32)
tmp7 = triton_helpers.maximum(tmp6, tmp5)
tmp8 = tmp2 + tmp7
tmp11 = tmp9 + tmp10
tmp12 = triton_helpers.maximum(tmp6, tmp11)
tmp13 = tmp8 + tmp12
tmp15 = tmp14 + tmp4
tmp16 = triton_helpers.maximum(tmp6, tmp15)
tmp17 = tmp13 + tmp16
tmp19 = tmp18 + tmp10
tmp20 = triton_helpers.maximum(tmp6, tmp19)
tmp21 = tmp17 + tmp20
tmp23 = tmp22 + tmp4
tmp24 = triton_helpers.maximum(tmp6, tmp23)
tmp25 = tmp21 + tmp24
tmp27 = tmp26 + tmp10
tmp28 = triton_helpers.maximum(tmp6, tmp27)
tmp29 = tmp25 + tmp28
tmp31 = tmp30 + tmp4
tmp32 = triton_helpers.maximum(tmp6, tmp31)
tmp33 = tmp29 + tmp32
tmp35 = tmp34 + tmp10
tmp36 = triton_helpers.maximum(tmp6, tmp35)
tmp37 = tmp33 + tmp36
tmp38 = tmp36 <= tmp1
tmp39 = tmp32 <= tmp1
tmp40 = tmp28 <= tmp1
tmp41 = tmp24 <= tmp1
tmp42 = tmp20 <= tmp1
tmp43 = tmp16 <= tmp1
tmp44 = tmp12 <= tmp1
tmp45 = tmp7 <= tmp1
tmp46 = tmp0 <= tmp1
tl.store(in_out_ptr0 + x2, tmp37, xmask)
tl.store(out_ptr0 + x2, tmp38, xmask)
tl.store(out_ptr1 + x2, tmp39, xmask)
tl.store(out_ptr2 + x2, tmp40, xmask)
tl.store(out_ptr3 + x2, tmp41, xmask)
tl.store(out_ptr4 + x2, tmp42, xmask)
tl.store(out_ptr5 + x2, tmp43, xmask)
tl.store(out_ptr6 + x2, tmp44, xmask)
tl.store(out_ptr7 + x2, tmp45, xmask)
tl.store(out_ptr8 + x2, tmp46, xmask)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_27(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8,
in_ptr9, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5,
out_ptr6, out_ptr7, out_ptr8, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp5 = tl.load(in_ptr2 + x2, xmask)
tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + x2, xmask)
tmp13 = tl.load(in_ptr5 + x2, xmask)
tmp17 = tl.load(in_ptr6 + x2, xmask)
tmp19 = tl.load(in_ptr7 + x2, xmask)
tmp23 = tl.load(in_ptr8 + x2, xmask)
tmp25 = tl.load(in_ptr9 + x2, xmask)
tmp1 = 0.0
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tmp4 + tmp9
tmp12 = tmp10 + tmp11
tmp14 = tmp13 + tmp6
tmp15 = triton_helpers.maximum(tmp8, tmp14)
tmp16 = tmp12 + tmp15
tmp18 = tmp16 + tmp17
tmp20 = tmp19 + tmp6
tmp21 = triton_helpers.maximum(tmp8, tmp20)
tmp22 = tmp18 + tmp21
tmp24 = tmp22 + tmp23
tmp26 = tmp25 + tmp6
tmp27 = triton_helpers.maximum(tmp8, tmp26)
tmp28 = tmp24 + tmp27
tmp29 = tmp27 <= tmp1
tmp30 = tmp21 <= tmp1
tmp31 = tmp15 <= tmp1
tmp32 = tmp9 <= tmp1
tmp33 = tmp23 <= tmp1
tmp34 = tmp17 <= tmp1
tmp35 = tmp11 <= tmp1
tmp36 = tmp3 <= tmp1
tmp37 = tmp0 <= tmp1
tl.store(in_out_ptr0 + x2, tmp28, xmask)
tl.store(out_ptr0 + x2, tmp29, xmask)
tl.store(out_ptr1 + x2, tmp30, xmask)
tl.store(out_ptr2 + x2, tmp31, xmask)
tl.store(out_ptr3 + x2, tmp32, xmask)
tl.store(out_ptr4 + x2, tmp33, xmask)
tl.store(out_ptr5 + x2, tmp34, xmask)
tl.store(out_ptr6 + x2, tmp35, xmask)
tl.store(out_ptr7 + x2, tmp36, xmask)
tl.store(out_ptr8 + x2, tmp37, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (8, 4), (4, 1))
assert_size_stride(primals_8, (8,), (1,))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4, 8), (8, 1))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (8, 4), (4, 1))
assert_size_stride(primals_14, (8,), (1,))
assert_size_stride(primals_15, (4, 4), (4, 1))
assert_size_stride(primals_16, (4,), (1,))
assert_size_stride(primals_17, (4, 8), (8, 1))
assert_size_stride(primals_18, (4,), (1,))
assert_size_stride(primals_19, (4, 4), (4, 1))
assert_size_stride(primals_20, (4,), (1,))
assert_size_stride(primals_21, (4, 4), (4, 1))
assert_size_stride(primals_22, (4,), (1,))
assert_size_stride(primals_23, (12, 4), (4, 1))
assert_size_stride(primals_24, (12,), (1,))
assert_size_stride(primals_25, (12, 4), (4, 1))
assert_size_stride(primals_26, (12,), (1,))
assert_size_stride(primals_27, (4, 4), (4, 1))
assert_size_stride(primals_28, (4,), (1,))
assert_size_stride(primals_29, (4, 4), (4, 1))
assert_size_stride(primals_30, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0)
del buf0
buf55 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
buf54 = reinterpret_tensor(buf55, (4, 4, 4), (32, 8, 1), 0)
get_raw_stream(0)
triton_poi_fused_cat_relu_0[grid(64)](buf2, primals_2, buf54, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
buf3 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 8), (1, 4), 0), out=buf3)
buf4 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
buf28 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
buf27 = reinterpret_tensor(buf28, (4, 4, 4), (32, 8, 1), 0)
triton_poi_fused_cat_relu_0[grid(64)](buf4, primals_5, buf27, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf4, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0)
del buf5
buf500 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(64)](buf6,
primals_10, buf500, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf7 = reinterpret_tensor(buf3, (4, 4, 8), (32, 8, 1), 0)
del buf3
buf501 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(128)](buf7,
primals_8, buf501, 128, XBLOCK=128, num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_bmm_3[grid(16)](buf6, buf8, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf9 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
triton_poi_fused_bmm_4[grid(16)](buf7, buf9, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf8, buf9, out=buf10)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf10, buf11, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf12 = reinterpret_tensor(buf9, (4, 4, 1), (4, 1, 16), 0)
del buf9
triton_poi_fused_bmm_6[grid(16)](buf6, buf12, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf13 = reinterpret_tensor(buf8, (4, 1, 4), (4, 16, 1), 0)
del buf8
triton_poi_fused_bmm_7[grid(16)](buf7, buf13, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf12, buf13, out=buf14)
buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf14, buf15, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf16 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
triton_poi_fused_cat_8[grid(32)](buf11, buf7, buf15, buf16, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf17 = reinterpret_tensor(buf13, (4, 4, 1), (4, 1, 16), 0)
del buf13
triton_poi_fused_bmm_9[grid(16)](buf6, buf17, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf18 = reinterpret_tensor(buf12, (4, 1, 4), (4, 16, 1), 0)
del buf12
triton_poi_fused_bmm_10[grid(16)](buf7, buf18, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf19 = buf15
del buf15
extern_kernels.bmm(buf17, buf18, out=buf19)
buf20 = buf11
del buf11
triton_poi_fused__softmax_5[grid(64)](buf19, buf20, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf21 = empty_strided_cuda((4, 4, 3), (12, 3, 1), torch.float32)
triton_poi_fused_cat_11[grid(48)](buf16, buf20, buf7, buf21, 48,
XBLOCK=64, num_warps=1, num_stages=1)
buf22 = reinterpret_tensor(buf18, (4, 4, 1), (4, 1, 16), 0)
del buf18
triton_poi_fused_bmm_12[grid(16)](buf6, buf22, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf23 = reinterpret_tensor(buf17, (4, 1, 4), (4, 16, 1), 0)
del buf17
triton_poi_fused_bmm_13[grid(16)](buf7, buf23, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf24 = buf20
del buf20
extern_kernels.bmm(buf22, buf23, out=buf24)
buf25 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf24, buf25, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf26 = reinterpret_tensor(buf28, (4, 4, 4), (32, 8, 1), 4)
triton_poi_fused_cat_14[grid(64)](buf21, buf25, buf7, buf26, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf29 = reinterpret_tensor(buf25, (16, 4), (4, 1), 0)
del buf25
extern_kernels.mm(reinterpret_tensor(buf28, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_11, (8, 4), (1, 8), 0), out=buf29)
buf30 = reinterpret_tensor(buf29, (4, 4, 4), (16, 4, 1), 0)
del buf29
buf64 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_relu_15[grid(64)](buf30, primals_12, buf4,
buf64, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf31 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf30, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_13, (4, 8), (1, 4), 0), out=buf31)
buf32 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf32)
buf33 = reinterpret_tensor(buf32, (4, 4, 4), (16, 4, 1), 0)
del buf32
buf497 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(64)](buf33,
primals_16, buf497, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf34 = reinterpret_tensor(buf31, (4, 4, 8), (32, 8, 1), 0)
del buf31
buf498 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(128)](buf34,
primals_14, buf498, 128, XBLOCK=128, num_warps=4, num_stages=1)
buf35 = reinterpret_tensor(buf23, (4, 4, 1), (4, 1, 16), 0)
del buf23
triton_poi_fused_bmm_3[grid(16)](buf33, buf35, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf36 = reinterpret_tensor(buf22, (4, 1, 4), (4, 16, 1), 0)
del buf22
triton_poi_fused_bmm_4[grid(16)](buf34, buf36, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf37 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf35, buf36, out=buf37)
buf38 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf37, buf38, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf39 = reinterpret_tensor(buf36, (4, 4, 1), (4, 1, 16), 0)
del buf36
triton_poi_fused_bmm_6[grid(16)](buf33, buf39, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf40 = reinterpret_tensor(buf35, (4, 1, 4), (4, 16, 1), 0)
del buf35
triton_poi_fused_bmm_7[grid(16)](buf34, buf40, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf41 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf39, buf40, out=buf41)
buf42 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf41, buf42, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf43 = buf16
del buf16
triton_poi_fused_cat_8[grid(32)](buf38, buf34, buf42, buf43, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf44 = reinterpret_tensor(buf40, (4, 4, 1), (4, 1, 16), 0)
del buf40
triton_poi_fused_bmm_9[grid(16)](buf33, buf44, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf45 = reinterpret_tensor(buf39, (4, 1, 4), (4, 16, 1), 0)
del buf39
triton_poi_fused_bmm_10[grid(16)](buf34, buf45, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf46 = buf42
del buf42
extern_kernels.bmm(buf44, buf45, out=buf46)
buf47 = buf38
del buf38
triton_poi_fused__softmax_5[grid(64)](buf46, buf47, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf48 = buf21
del buf21
triton_poi_fused_cat_11[grid(48)](buf43, buf47, buf34, buf48, 48,
XBLOCK=64, num_warps=1, num_stages=1)
buf49 = reinterpret_tensor(buf45, (4, 4, 1), (4, 1, 16), 0)
del buf45
triton_poi_fused_bmm_12[grid(16)](buf33, buf49, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf50 = reinterpret_tensor(buf44, (4, 1, 4), (4, 16, 1), 0)
del buf44
triton_poi_fused_bmm_13[grid(16)](buf34, buf50, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf51 = buf47
del buf47
extern_kernels.bmm(buf49, buf50, out=buf51)
buf52 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf51, buf52, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf53 = reinterpret_tensor(buf55, (4, 4, 4), (32, 8, 1), 4)
triton_poi_fused_cat_14[grid(64)](buf48, buf52, buf34, buf53, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf56 = reinterpret_tensor(buf52, (16, 4), (4, 1), 0)
del buf52
extern_kernels.mm(reinterpret_tensor(buf55, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_17, (8, 4), (1, 8), 0), out=buf56)
buf57 = reinterpret_tensor(buf50, (4, 4), (4, 1), 0)
del buf50
buf58 = buf57
del buf57
triton_poi_fused_add_div_relu_sum_16[grid(16)](buf58, buf56,
primals_18, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf59 = reinterpret_tensor(buf49, (4, 4), (4, 1), 0)
del buf49
triton_poi_fused_add_div_sum_17[grid(16)](buf30, buf4, buf59, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf60 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_20, buf58, reinterpret_tensor(
primals_19, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf60)
buf61 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_22, buf59, reinterpret_tensor(
primals_21, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf61)
buf62 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_relu_18[grid(64)](buf56, primals_18, buf2,
buf62, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf63 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf62, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_23, (4, 12), (1, 4), 0), out=buf63)
buf65 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf64, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_25, (4, 12), (1, 4), 0), out=buf65)
buf66 = reinterpret_tensor(buf63, (4, 4, 12), (48, 12, 1), 0)
del buf63
buf495 = empty_strided_cuda((4, 4, 12), (48, 12, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_19[grid(192)](buf66,
primals_24, buf495, 192, XBLOCK=128, num_warps=4, num_stages=1)
buf67 = reinterpret_tensor(buf65, (4, 4, 12), (48, 12, 1), 0)
del buf65
buf494 = empty_strided_cuda((4, 4, 12), (48, 12, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_19[grid(192)](buf67,
primals_26, buf494, 192, XBLOCK=128, num_warps=4, num_stages=1)
buf68 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf69 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf80 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_mul_20[grid(64)](buf61, buf66, buf68, buf69,
buf80, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf70 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_bmm_3[grid(16)](buf68, buf70, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf71 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
triton_poi_fused_bmm_3[grid(16)](buf69, buf71, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf72 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf70, buf71, out=buf72)
buf73 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf74 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf81 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_mul_20[grid(64)](buf60, buf67, buf73, buf74,
buf81, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf75 = reinterpret_tensor(buf71, (4, 4, 1), (4, 1, 16), 0)
del buf71
triton_poi_fused_bmm_3[grid(16)](buf73, buf75, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf76 = reinterpret_tensor(buf70, (4, 1, 4), (4, 16, 1), 0)
del buf70
triton_poi_fused_bmm_3[grid(16)](buf74, buf76, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf77 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf75, buf76, out=buf77)
buf78 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf72, buf78, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf79 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf77, buf79, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf82 = reinterpret_tensor(buf76, (4, 4, 1), (4, 1, 16), 0)
del buf76
triton_poi_fused_bmm_6[grid(16)](buf68, buf82, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf83 = reinterpret_tensor(buf75, (4, 1, 4), (4, 16, 1), 0)
del buf75
triton_poi_fused_bmm_6[grid(16)](buf69, buf83, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf84 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf82, buf83, out=buf84)
buf85 = reinterpret_tensor(buf83, (4, 4, 1), (4, 1, 16), 0)
del buf83
triton_poi_fused_bmm_6[grid(16)](buf73, buf85, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf86 = reinterpret_tensor(buf82, (4, 1, 4), (4, 16, 1), 0)
del buf82
triton_poi_fused_bmm_6[grid(16)](buf74, buf86, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf87 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf85, buf86, out=buf87)
buf88 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf84, buf88, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf89 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf87, buf89, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf90 = buf43
del buf43
triton_poi_fused_cat_21[grid(32)](buf78, buf80, buf88, buf90, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf91 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
triton_poi_fused_cat_21[grid(32)](buf79, buf81, buf89, buf91, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf92 = reinterpret_tensor(buf86, (4, 4, 1), (4, 1, 16), 0)
del buf86
triton_poi_fused_bmm_9[grid(16)](buf68, buf92, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf93 = reinterpret_tensor(buf85, (4, 1, 4), (4, 16, 1), 0)
del buf85
triton_poi_fused_bmm_9[grid(16)](buf69, buf93, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf94 = buf89
del buf89
extern_kernels.bmm(buf92, buf93, out=buf94)
buf95 = reinterpret_tensor(buf93, (4, 4, 1), (4, 1, 16), 0)
del buf93
triton_poi_fused_bmm_9[grid(16)](buf73, buf95, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf96 = reinterpret_tensor(buf92, (4, 1, 4), (4, 16, 1), 0)
del buf92
triton_poi_fused_bmm_9[grid(16)](buf74, buf96, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf97 = buf79
del buf79
extern_kernels.bmm(buf95, buf96, out=buf97)
buf98 = buf88
del buf88
triton_poi_fused__softmax_5[grid(64)](buf94, buf98, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf99 = buf78
del buf78
triton_poi_fused__softmax_5[grid(64)](buf97, buf99, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf100 = buf48
del buf48
triton_poi_fused_cat_22[grid(48)](buf90, buf98, buf80, buf100, 48,
XBLOCK=64, num_warps=1, num_stages=1)
buf101 = empty_strided_cuda((4, 4, 3), (12, 3, 1), torch.float32)
triton_poi_fused_cat_22[grid(48)](buf91, buf99, buf81, buf101, 48,
XBLOCK=64, num_warps=1, num_stages=1)
buf102 = reinterpret_tensor(buf96, (4, 4, 1), (4, 1, 16), 0)
del buf96
triton_poi_fused_bmm_12[grid(16)](buf68, buf102, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf103 = reinterpret_tensor(buf95, (4, 1, 4), (4, 16, 1), 0)
del buf95
triton_poi_fused_bmm_12[grid(16)](buf69, buf103, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf104 = buf99
del buf99
extern_kernels.bmm(buf102, buf103, out=buf104)
buf105 = reinterpret_tensor(buf103, (4, 4, 1), (4, 1, 16), 0)
del buf103
triton_poi_fused_bmm_12[grid(16)](buf73, buf105, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf106 = reinterpret_tensor(buf102, (4, 1, 4), (4, 16, 1), 0)
del buf102
triton_poi_fused_bmm_12[grid(16)](buf74, buf106, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf107 = buf98
del buf98
extern_kernels.bmm(buf105, buf106, out=buf107)
buf108 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf104, buf108, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf109 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf107, buf109, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf110 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf112 = buf110
del buf110
triton_poi_fused_add_cat_23[grid(64)](buf112, buf100, buf108, buf80,
buf62, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf111 = buf108
del buf108
buf114 = buf111
del buf111
triton_poi_fused_add_cat_23[grid(64)](buf114, buf101, buf109, buf81,
buf64, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf113 = reinterpret_tensor(buf109, (16, 4), (4, 1), 0)
del buf109
extern_kernels.mm(reinterpret_tensor(buf112, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_27, (4, 4), (1, 4), 0), out=buf113)
buf115 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf114, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_29, (4, 4), (1, 4), 0), out=buf115)
buf116 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf169 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
buf168 = reinterpret_tensor(buf169, (4, 4, 4), (32, 8, 1), 0)
triton_poi_fused_add_cat_relu_24[grid(64)](buf113, primals_28,
buf56, primals_18, buf2, buf116, buf168, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf117 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf116, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 8), (1, 4), 0), out=buf117)
buf118 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf142 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
buf141 = reinterpret_tensor(buf142, (4, 4, 4), (32, 8, 1), 0)
triton_poi_fused_add_cat_relu_25[grid(64)](buf115, primals_30,
buf30, buf4, buf118, buf141, 64, XBLOCK=64, num_warps=1,
num_stages=1)
buf119 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf118, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf119)
buf120 = reinterpret_tensor(buf119, (4, 4, 4), (16, 4, 1), 0)
del buf119
buf490 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(64)](buf120,
primals_10, buf490, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf121 = reinterpret_tensor(buf117, (4, 4, 8), (32, 8, 1), 0)
del buf117
buf491 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(128)](buf121,
primals_8, buf491, 128, XBLOCK=128, num_warps=4, num_stages=1)
buf122 = reinterpret_tensor(buf106, (4, 4, 1), (4, 1, 16), 0)
del buf106
triton_poi_fused_bmm_3[grid(16)](buf120, buf122, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf123 = reinterpret_tensor(buf105, (4, 1, 4), (4, 16, 1), 0)
del buf105
triton_poi_fused_bmm_4[grid(16)](buf121, buf123, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf124 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf122, buf123, out=buf124)
buf125 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf124, buf125, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf126 = reinterpret_tensor(buf123, (4, 4, 1), (4, 1, 16), 0)
del buf123
triton_poi_fused_bmm_6[grid(16)](buf120, buf126, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf127 = reinterpret_tensor(buf122, (4, 1, 4), (4, 16, 1), 0)
del buf122
triton_poi_fused_bmm_7[grid(16)](buf121, buf127, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf128 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf126, buf127, out=buf128)
buf129 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf128, buf129, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf130 = buf91
del buf91
triton_poi_fused_cat_8[grid(32)](buf125, buf121, buf129, buf130, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf131 = reinterpret_tensor(buf127, (4, 4, 1), (4, 1, 16), 0)
del buf127
triton_poi_fused_bmm_9[grid(16)](buf120, buf131, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf132 = reinterpret_tensor(buf126, (4, 1, 4), (4, 16, 1), 0)
del buf126
triton_poi_fused_bmm_10[grid(16)](buf121, buf132, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf133 = buf129
del buf129
extern_kernels.bmm(buf131, buf132, out=buf133)
buf134 = buf125
del buf125
triton_poi_fused__softmax_5[grid(64)](buf133, buf134, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf135 = buf101
del buf101
triton_poi_fused_cat_11[grid(48)](buf130, buf134, buf121, buf135,
48, XBLOCK=64, num_warps=1, num_stages=1)
buf136 = reinterpret_tensor(buf132, (4, 4, 1), (4, 1, 16), 0)
del buf132
triton_poi_fused_bmm_12[grid(16)](buf120, buf136, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf137 = reinterpret_tensor(buf131, (4, 1, 4), (4, 16, 1), 0)
del buf131
triton_poi_fused_bmm_13[grid(16)](buf121, buf137, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf138 = buf134
del buf134
extern_kernels.bmm(buf136, buf137, out=buf138)
buf139 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf138, buf139, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf140 = reinterpret_tensor(buf142, (4, 4, 4), (32, 8, 1), 4)
triton_poi_fused_cat_14[grid(64)](buf135, buf139, buf121, buf140,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf143 = reinterpret_tensor(buf139, (16, 4), (4, 1), 0)
del buf139
extern_kernels.mm(reinterpret_tensor(buf142, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_11, (8, 4), (1, 8), 0), out=buf143)
buf144 = reinterpret_tensor(buf143, (4, 4, 4), (16, 4, 1), 0)
del buf143
buf178 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_relu_15[grid(64)](buf144, primals_12, buf118,
buf178, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf145 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf144, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_13, (4, 8), (1, 4), 0), out=buf145)
buf146 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf116, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf146)
buf147 = reinterpret_tensor(buf146, (4, 4, 4), (16, 4, 1), 0)
del buf146
buf487 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(64)](buf147,
primals_16, buf487, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf148 = reinterpret_tensor(buf145, (4, 4, 8), (32, 8, 1), 0)
del buf145
buf488 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(128)](buf148,
primals_14, buf488, 128, XBLOCK=128, num_warps=4, num_stages=1)
buf149 = reinterpret_tensor(buf137, (4, 4, 1), (4, 1, 16), 0)
del buf137
triton_poi_fused_bmm_3[grid(16)](buf147, buf149, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf150 = reinterpret_tensor(buf136, (4, 1, 4), (4, 16, 1), 0)
del buf136
triton_poi_fused_bmm_4[grid(16)](buf148, buf150, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf151 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf149, buf150, out=buf151)
buf152 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf151, buf152, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf153 = reinterpret_tensor(buf150, (4, 4, 1), (4, 1, 16), 0)
del buf150
triton_poi_fused_bmm_6[grid(16)](buf147, buf153, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf154 = reinterpret_tensor(buf149, (4, 1, 4), (4, 16, 1), 0)
del buf149
triton_poi_fused_bmm_7[grid(16)](buf148, buf154, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf155 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf153, buf154, out=buf155)
buf156 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf155, buf156, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf157 = buf130
del buf130
triton_poi_fused_cat_8[grid(32)](buf152, buf148, buf156, buf157, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf158 = reinterpret_tensor(buf154, (4, 4, 1), (4, 1, 16), 0)
del buf154
triton_poi_fused_bmm_9[grid(16)](buf147, buf158, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf159 = reinterpret_tensor(buf153, (4, 1, 4), (4, 16, 1), 0)
del buf153
triton_poi_fused_bmm_10[grid(16)](buf148, buf159, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf160 = buf156
del buf156
extern_kernels.bmm(buf158, buf159, out=buf160)
buf161 = buf152
del buf152
triton_poi_fused__softmax_5[grid(64)](buf160, buf161, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf162 = buf135
del buf135
triton_poi_fused_cat_11[grid(48)](buf157, buf161, buf148, buf162,
48, XBLOCK=64, num_warps=1, num_stages=1)
buf163 = reinterpret_tensor(buf159, (4, 4, 1), (4, 1, 16), 0)
del buf159
triton_poi_fused_bmm_12[grid(16)](buf147, buf163, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf164 = reinterpret_tensor(buf158, (4, 1, 4), (4, 16, 1), 0)
del buf158
triton_poi_fused_bmm_13[grid(16)](buf148, buf164, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf165 = buf161
del buf161
extern_kernels.bmm(buf163, buf164, out=buf165)
buf166 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf165, buf166, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf167 = reinterpret_tensor(buf169, (4, 4, 4), (32, 8, 1), 4)
triton_poi_fused_cat_14[grid(64)](buf162, buf166, buf148, buf167,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf170 = reinterpret_tensor(buf166, (16, 4), (4, 1), 0)
del buf166
extern_kernels.mm(reinterpret_tensor(buf169, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_17, (8, 4), (1, 8), 0), out=buf170)
buf171 = reinterpret_tensor(buf164, (4, 4), (4, 1), 0)
del buf164
buf172 = buf171
del buf171
triton_poi_fused_add_div_relu_sum_16[grid(16)](buf172, buf170,
primals_18, buf116, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf173 = reinterpret_tensor(buf163, (4, 4), (4, 1), 0)
del buf163
triton_poi_fused_add_div_sum_17[grid(16)](buf144, buf118, buf173,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf174 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_20, buf172, reinterpret_tensor(
primals_19, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf174)
buf175 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_22, buf173, reinterpret_tensor(
primals_21, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf175)
buf176 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_relu_18[grid(64)](buf170, primals_18, buf116,
buf176, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf177 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf176, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_23, (4, 12), (1, 4), 0), out=buf177)
buf179 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf178, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_25, (4, 12), (1, 4), 0), out=buf179)
buf180 = reinterpret_tensor(buf177, (4, 4, 12), (48, 12, 1), 0)
del buf177
buf485 = empty_strided_cuda((4, 4, 12), (48, 12, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_19[grid(192)](buf180,
primals_24, buf485, 192, XBLOCK=128, num_warps=4, num_stages=1)
buf181 = reinterpret_tensor(buf179, (4, 4, 12), (48, 12, 1), 0)
del buf179
buf484 = empty_strided_cuda((4, 4, 12), (48, 12, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_19[grid(192)](buf181,
primals_26, buf484, 192, XBLOCK=128, num_warps=4, num_stages=1)
buf182 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf183 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf194 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_mul_20[grid(64)](buf175, buf180, buf182,
buf183, buf194, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf184 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_bmm_3[grid(16)](buf182, buf184, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf185 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
triton_poi_fused_bmm_3[grid(16)](buf183, buf185, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf186 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf184, buf185, out=buf186)
buf187 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf188 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf195 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_mul_20[grid(64)](buf174, buf181, buf187,
buf188, buf195, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf189 = reinterpret_tensor(buf185, (4, 4, 1), (4, 1, 16), 0)
del buf185
triton_poi_fused_bmm_3[grid(16)](buf187, buf189, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf190 = reinterpret_tensor(buf184, (4, 1, 4), (4, 16, 1), 0)
del buf184
triton_poi_fused_bmm_3[grid(16)](buf188, buf190, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf191 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf189, buf190, out=buf191)
buf192 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf186, buf192, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf193 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf191, buf193, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf196 = reinterpret_tensor(buf190, (4, 4, 1), (4, 1, 16), 0)
del buf190
triton_poi_fused_bmm_6[grid(16)](buf182, buf196, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf197 = reinterpret_tensor(buf189, (4, 1, 4), (4, 16, 1), 0)
del buf189
triton_poi_fused_bmm_6[grid(16)](buf183, buf197, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf198 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf196, buf197, out=buf198)
buf199 = reinterpret_tensor(buf197, (4, 4, 1), (4, 1, 16), 0)
del buf197
triton_poi_fused_bmm_6[grid(16)](buf187, buf199, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf200 = reinterpret_tensor(buf196, (4, 1, 4), (4, 16, 1), 0)
del buf196
triton_poi_fused_bmm_6[grid(16)](buf188, buf200, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf201 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf199, buf200, out=buf201)
buf202 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf198, buf202, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf203 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf201, buf203, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf204 = buf157
del buf157
triton_poi_fused_cat_21[grid(32)](buf192, buf194, buf202, buf204,
32, XBLOCK=32, num_warps=1, num_stages=1)
buf205 = buf90
del buf90
triton_poi_fused_cat_21[grid(32)](buf193, buf195, buf203, buf205,
32, XBLOCK=32, num_warps=1, num_stages=1)
buf206 = reinterpret_tensor(buf200, (4, 4, 1), (4, 1, 16), 0)
del buf200
triton_poi_fused_bmm_9[grid(16)](buf182, buf206, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf207 = reinterpret_tensor(buf199, (4, 1, 4), (4, 16, 1), 0)
del buf199
triton_poi_fused_bmm_9[grid(16)](buf183, buf207, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf208 = buf203
del buf203
extern_kernels.bmm(buf206, buf207, out=buf208)
buf209 = reinterpret_tensor(buf207, (4, 4, 1), (4, 1, 16), 0)
del buf207
triton_poi_fused_bmm_9[grid(16)](buf187, buf209, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf210 = reinterpret_tensor(buf206, (4, 1, 4), (4, 16, 1), 0)
del buf206
triton_poi_fused_bmm_9[grid(16)](buf188, buf210, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf211 = buf193
del buf193
extern_kernels.bmm(buf209, buf210, out=buf211)
buf212 = buf202
del buf202
triton_poi_fused__softmax_5[grid(64)](buf208, buf212, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf213 = buf192
del buf192
triton_poi_fused__softmax_5[grid(64)](buf211, buf213, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf214 = buf162
del buf162
triton_poi_fused_cat_22[grid(48)](buf204, buf212, buf194, buf214,
48, XBLOCK=64, num_warps=1, num_stages=1)
buf215 = buf100
del buf100
triton_poi_fused_cat_22[grid(48)](buf205, buf213, buf195, buf215,
48, XBLOCK=64, num_warps=1, num_stages=1)
buf216 = reinterpret_tensor(buf210, (4, 4, 1), (4, 1, 16), 0)
del buf210
triton_poi_fused_bmm_12[grid(16)](buf182, buf216, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf217 = reinterpret_tensor(buf209, (4, 1, 4), (4, 16, 1), 0)
del buf209
triton_poi_fused_bmm_12[grid(16)](buf183, buf217, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf218 = buf213
del buf213
extern_kernels.bmm(buf216, buf217, out=buf218)
buf219 = reinterpret_tensor(buf217, (4, 4, 1), (4, 1, 16), 0)
del buf217
triton_poi_fused_bmm_12[grid(16)](buf187, buf219, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf220 = reinterpret_tensor(buf216, (4, 1, 4), (4, 16, 1), 0)
del buf216
triton_poi_fused_bmm_12[grid(16)](buf188, buf220, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf221 = buf212
del buf212
extern_kernels.bmm(buf219, buf220, out=buf221)
buf222 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf218, buf222, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf223 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf221, buf223, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf224 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf226 = buf224
del buf224
triton_poi_fused_add_cat_23[grid(64)](buf226, buf214, buf222,
buf194, buf176, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf225 = buf222
del buf222
buf228 = buf225
del buf225
triton_poi_fused_add_cat_23[grid(64)](buf228, buf215, buf223,
buf195, buf178, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf227 = reinterpret_tensor(buf223, (16, 4), (4, 1), 0)
del buf223
extern_kernels.mm(reinterpret_tensor(buf226, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_27, (4, 4), (1, 4), 0), out=buf227)
buf229 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf228, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_29, (4, 4), (1, 4), 0), out=buf229)
buf230 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf283 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
buf282 = reinterpret_tensor(buf283, (4, 4, 4), (32, 8, 1), 0)
triton_poi_fused_add_cat_relu_24[grid(64)](buf227, primals_28,
buf170, primals_18, buf116, buf230, buf282, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf231 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf230, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 8), (1, 4), 0), out=buf231)
buf232 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf256 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
buf255 = reinterpret_tensor(buf256, (4, 4, 4), (32, 8, 1), 0)
triton_poi_fused_add_cat_relu_25[grid(64)](buf229, primals_30,
buf144, buf118, buf232, buf255, 64, XBLOCK=64, num_warps=1,
num_stages=1)
buf233 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf232, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf233)
buf234 = reinterpret_tensor(buf233, (4, 4, 4), (16, 4, 1), 0)
del buf233
buf480 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(64)](buf234,
primals_10, buf480, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf235 = reinterpret_tensor(buf231, (4, 4, 8), (32, 8, 1), 0)
del buf231
buf481 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(128)](buf235,
primals_8, buf481, 128, XBLOCK=128, num_warps=4, num_stages=1)
buf236 = reinterpret_tensor(buf220, (4, 4, 1), (4, 1, 16), 0)
del buf220
triton_poi_fused_bmm_3[grid(16)](buf234, buf236, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf237 = reinterpret_tensor(buf219, (4, 1, 4), (4, 16, 1), 0)
del buf219
triton_poi_fused_bmm_4[grid(16)](buf235, buf237, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf238 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf236, buf237, out=buf238)
buf239 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf238, buf239, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf240 = reinterpret_tensor(buf237, (4, 4, 1), (4, 1, 16), 0)
del buf237
triton_poi_fused_bmm_6[grid(16)](buf234, buf240, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf241 = reinterpret_tensor(buf236, (4, 1, 4), (4, 16, 1), 0)
del buf236
triton_poi_fused_bmm_7[grid(16)](buf235, buf241, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf242 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf240, buf241, out=buf242)
buf243 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf242, buf243, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf244 = buf205
del buf205
triton_poi_fused_cat_8[grid(32)](buf239, buf235, buf243, buf244, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf245 = reinterpret_tensor(buf241, (4, 4, 1), (4, 1, 16), 0)
del buf241
triton_poi_fused_bmm_9[grid(16)](buf234, buf245, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf246 = reinterpret_tensor(buf240, (4, 1, 4), (4, 16, 1), 0)
del buf240
triton_poi_fused_bmm_10[grid(16)](buf235, buf246, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf247 = buf243
del buf243
extern_kernels.bmm(buf245, buf246, out=buf247)
buf248 = buf239
del buf239
triton_poi_fused__softmax_5[grid(64)](buf247, buf248, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf249 = buf215
del buf215
triton_poi_fused_cat_11[grid(48)](buf244, buf248, buf235, buf249,
48, XBLOCK=64, num_warps=1, num_stages=1)
buf250 = reinterpret_tensor(buf246, (4, 4, 1), (4, 1, 16), 0)
del buf246
triton_poi_fused_bmm_12[grid(16)](buf234, buf250, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf251 = reinterpret_tensor(buf245, (4, 1, 4), (4, 16, 1), 0)
del buf245
triton_poi_fused_bmm_13[grid(16)](buf235, buf251, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf252 = buf248
del buf248
extern_kernels.bmm(buf250, buf251, out=buf252)
buf253 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf252, buf253, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf254 = reinterpret_tensor(buf256, (4, 4, 4), (32, 8, 1), 4)
triton_poi_fused_cat_14[grid(64)](buf249, buf253, buf235, buf254,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf257 = reinterpret_tensor(buf253, (16, 4), (4, 1), 0)
del buf253
extern_kernels.mm(reinterpret_tensor(buf256, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_11, (8, 4), (1, 8), 0), out=buf257)
buf258 = reinterpret_tensor(buf257, (4, 4, 4), (16, 4, 1), 0)
del buf257
buf292 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_relu_15[grid(64)](buf258, primals_12, buf232,
buf292, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf259 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf258, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_13, (4, 8), (1, 4), 0), out=buf259)
buf260 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf230, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf260)
buf261 = reinterpret_tensor(buf260, (4, 4, 4), (16, 4, 1), 0)
del buf260
buf477 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(64)](buf261,
primals_16, buf477, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf262 = reinterpret_tensor(buf259, (4, 4, 8), (32, 8, 1), 0)
del buf259
buf478 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(128)](buf262,
primals_14, buf478, 128, XBLOCK=128, num_warps=4, num_stages=1)
buf263 = reinterpret_tensor(buf251, (4, 4, 1), (4, 1, 16), 0)
del buf251
triton_poi_fused_bmm_3[grid(16)](buf261, buf263, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf264 = reinterpret_tensor(buf250, (4, 1, 4), (4, 16, 1), 0)
del buf250
triton_poi_fused_bmm_4[grid(16)](buf262, buf264, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf265 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf263, buf264, out=buf265)
buf266 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf265, buf266, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf267 = reinterpret_tensor(buf264, (4, 4, 1), (4, 1, 16), 0)
del buf264
triton_poi_fused_bmm_6[grid(16)](buf261, buf267, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf268 = reinterpret_tensor(buf263, (4, 1, 4), (4, 16, 1), 0)
del buf263
triton_poi_fused_bmm_7[grid(16)](buf262, buf268, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf269 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf267, buf268, out=buf269)
buf270 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf269, buf270, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf271 = buf244
del buf244
triton_poi_fused_cat_8[grid(32)](buf266, buf262, buf270, buf271, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf272 = reinterpret_tensor(buf268, (4, 4, 1), (4, 1, 16), 0)
del buf268
triton_poi_fused_bmm_9[grid(16)](buf261, buf272, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf273 = reinterpret_tensor(buf267, (4, 1, 4), (4, 16, 1), 0)
del buf267
triton_poi_fused_bmm_10[grid(16)](buf262, buf273, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf274 = buf270
del buf270
extern_kernels.bmm(buf272, buf273, out=buf274)
buf275 = buf266
del buf266
triton_poi_fused__softmax_5[grid(64)](buf274, buf275, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf276 = buf249
del buf249
triton_poi_fused_cat_11[grid(48)](buf271, buf275, buf262, buf276,
48, XBLOCK=64, num_warps=1, num_stages=1)
buf277 = reinterpret_tensor(buf273, (4, 4, 1), (4, 1, 16), 0)
del buf273
triton_poi_fused_bmm_12[grid(16)](buf261, buf277, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf278 = reinterpret_tensor(buf272, (4, 1, 4), (4, 16, 1), 0)
del buf272
triton_poi_fused_bmm_13[grid(16)](buf262, buf278, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf279 = buf275
del buf275
extern_kernels.bmm(buf277, buf278, out=buf279)
buf280 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf279, buf280, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf281 = reinterpret_tensor(buf283, (4, 4, 4), (32, 8, 1), 4)
triton_poi_fused_cat_14[grid(64)](buf276, buf280, buf262, buf281,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf284 = reinterpret_tensor(buf280, (16, 4), (4, 1), 0)
del buf280
extern_kernels.mm(reinterpret_tensor(buf283, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_17, (8, 4), (1, 8), 0), out=buf284)
buf285 = reinterpret_tensor(buf278, (4, 4), (4, 1), 0)
del buf278
buf286 = buf285
del buf285
triton_poi_fused_add_div_relu_sum_16[grid(16)](buf286, buf284,
primals_18, buf230, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf287 = reinterpret_tensor(buf277, (4, 4), (4, 1), 0)
del buf277
triton_poi_fused_add_div_sum_17[grid(16)](buf258, buf232, buf287,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf288 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_20, buf286, reinterpret_tensor(
primals_19, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf288)
buf289 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_22, buf287, reinterpret_tensor(
primals_21, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf289)
buf290 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_relu_18[grid(64)](buf284, primals_18, buf230,
buf290, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf291 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf290, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_23, (4, 12), (1, 4), 0), out=buf291)
buf293 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf292, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_25, (4, 12), (1, 4), 0), out=buf293)
buf294 = reinterpret_tensor(buf291, (4, 4, 12), (48, 12, 1), 0)
del buf291
buf475 = empty_strided_cuda((4, 4, 12), (48, 12, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_19[grid(192)](buf294,
primals_24, buf475, 192, XBLOCK=128, num_warps=4, num_stages=1)
buf295 = reinterpret_tensor(buf293, (4, 4, 12), (48, 12, 1), 0)
del buf293
buf474 = empty_strided_cuda((4, 4, 12), (48, 12, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_19[grid(192)](buf295,
primals_26, buf474, 192, XBLOCK=128, num_warps=4, num_stages=1)
buf296 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf297 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf308 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_mul_20[grid(64)](buf289, buf294, buf296,
buf297, buf308, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf298 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_bmm_3[grid(16)](buf296, buf298, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf299 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
triton_poi_fused_bmm_3[grid(16)](buf297, buf299, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf300 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf298, buf299, out=buf300)
buf301 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf302 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf309 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_mul_20[grid(64)](buf288, buf295, buf301,
buf302, buf309, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf303 = reinterpret_tensor(buf299, (4, 4, 1), (4, 1, 16), 0)
del buf299
triton_poi_fused_bmm_3[grid(16)](buf301, buf303, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf304 = reinterpret_tensor(buf298, (4, 1, 4), (4, 16, 1), 0)
del buf298
triton_poi_fused_bmm_3[grid(16)](buf302, buf304, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf305 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf303, buf304, out=buf305)
buf306 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf300, buf306, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf307 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf305, buf307, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf310 = reinterpret_tensor(buf304, (4, 4, 1), (4, 1, 16), 0)
del buf304
triton_poi_fused_bmm_6[grid(16)](buf296, buf310, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf311 = reinterpret_tensor(buf303, (4, 1, 4), (4, 16, 1), 0)
del buf303
triton_poi_fused_bmm_6[grid(16)](buf297, buf311, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf312 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf310, buf311, out=buf312)
buf313 = reinterpret_tensor(buf311, (4, 4, 1), (4, 1, 16), 0)
del buf311
triton_poi_fused_bmm_6[grid(16)](buf301, buf313, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf314 = reinterpret_tensor(buf310, (4, 1, 4), (4, 16, 1), 0)
del buf310
triton_poi_fused_bmm_6[grid(16)](buf302, buf314, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf315 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf313, buf314, out=buf315)
buf316 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf312, buf316, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf317 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf315, buf317, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf318 = buf271
del buf271
triton_poi_fused_cat_21[grid(32)](buf306, buf308, buf316, buf318,
32, XBLOCK=32, num_warps=1, num_stages=1)
buf319 = buf204
del buf204
triton_poi_fused_cat_21[grid(32)](buf307, buf309, buf317, buf319,
32, XBLOCK=32, num_warps=1, num_stages=1)
buf320 = reinterpret_tensor(buf314, (4, 4, 1), (4, 1, 16), 0)
del buf314
triton_poi_fused_bmm_9[grid(16)](buf296, buf320, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf321 = reinterpret_tensor(buf313, (4, 1, 4), (4, 16, 1), 0)
del buf313
triton_poi_fused_bmm_9[grid(16)](buf297, buf321, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf322 = buf317
del buf317
extern_kernels.bmm(buf320, buf321, out=buf322)
buf323 = reinterpret_tensor(buf321, (4, 4, 1), (4, 1, 16), 0)
del buf321
triton_poi_fused_bmm_9[grid(16)](buf301, buf323, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf324 = reinterpret_tensor(buf320, (4, 1, 4), (4, 16, 1), 0)
del buf320
triton_poi_fused_bmm_9[grid(16)](buf302, buf324, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf325 = buf307
del buf307
extern_kernels.bmm(buf323, buf324, out=buf325)
buf326 = buf316
del buf316
triton_poi_fused__softmax_5[grid(64)](buf322, buf326, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf327 = buf306
del buf306
triton_poi_fused__softmax_5[grid(64)](buf325, buf327, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf328 = buf276
del buf276
triton_poi_fused_cat_22[grid(48)](buf318, buf326, buf308, buf328,
48, XBLOCK=64, num_warps=1, num_stages=1)
buf329 = buf214
del buf214
triton_poi_fused_cat_22[grid(48)](buf319, buf327, buf309, buf329,
48, XBLOCK=64, num_warps=1, num_stages=1)
buf330 = reinterpret_tensor(buf324, (4, 4, 1), (4, 1, 16), 0)
del buf324
triton_poi_fused_bmm_12[grid(16)](buf296, buf330, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf331 = reinterpret_tensor(buf323, (4, 1, 4), (4, 16, 1), 0)
del buf323
triton_poi_fused_bmm_12[grid(16)](buf297, buf331, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf332 = buf327
del buf327
extern_kernels.bmm(buf330, buf331, out=buf332)
buf333 = reinterpret_tensor(buf331, (4, 4, 1), (4, 1, 16), 0)
del buf331
triton_poi_fused_bmm_12[grid(16)](buf301, buf333, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf334 = reinterpret_tensor(buf330, (4, 1, 4), (4, 16, 1), 0)
del buf330
triton_poi_fused_bmm_12[grid(16)](buf302, buf334, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf335 = buf326
del buf326
extern_kernels.bmm(buf333, buf334, out=buf335)
buf336 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf332, buf336, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf337 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf335, buf337, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf338 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf340 = buf338
del buf338
triton_poi_fused_add_cat_23[grid(64)](buf340, buf328, buf336,
buf308, buf290, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf339 = buf336
del buf336
buf342 = buf339
del buf339
triton_poi_fused_add_cat_23[grid(64)](buf342, buf329, buf337,
buf309, buf292, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf341 = reinterpret_tensor(buf337, (16, 4), (4, 1), 0)
del buf337
extern_kernels.mm(reinterpret_tensor(buf340, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_27, (4, 4), (1, 4), 0), out=buf341)
buf343 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf342, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_29, (4, 4), (1, 4), 0), out=buf343)
buf344 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf397 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
buf396 = reinterpret_tensor(buf397, (4, 4, 4), (32, 8, 1), 0)
triton_poi_fused_add_cat_relu_24[grid(64)](buf341, primals_28,
buf284, primals_18, buf230, buf344, buf396, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf345 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf344, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 8), (1, 4), 0), out=buf345)
buf346 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf370 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
buf369 = reinterpret_tensor(buf370, (4, 4, 4), (32, 8, 1), 0)
triton_poi_fused_add_cat_relu_25[grid(64)](buf343, primals_30,
buf258, buf232, buf346, buf369, 64, XBLOCK=64, num_warps=1,
num_stages=1)
buf347 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf346, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf347)
buf348 = reinterpret_tensor(buf347, (4, 4, 4), (16, 4, 1), 0)
del buf347
buf470 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(64)](buf348,
primals_10, buf470, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_10
buf349 = reinterpret_tensor(buf345, (4, 4, 8), (32, 8, 1), 0)
del buf345
buf471 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(128)](buf349,
primals_8, buf471, 128, XBLOCK=128, num_warps=4, num_stages=1)
del primals_8
buf350 = reinterpret_tensor(buf334, (4, 4, 1), (4, 1, 16), 0)
del buf334
triton_poi_fused_bmm_3[grid(16)](buf348, buf350, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf351 = reinterpret_tensor(buf333, (4, 1, 4), (4, 16, 1), 0)
del buf333
triton_poi_fused_bmm_4[grid(16)](buf349, buf351, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf352 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf350, buf351, out=buf352)
buf353 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf352, buf353, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf354 = reinterpret_tensor(buf351, (4, 4, 1), (4, 1, 16), 0)
del buf351
triton_poi_fused_bmm_6[grid(16)](buf348, buf354, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf355 = reinterpret_tensor(buf350, (4, 1, 4), (4, 16, 1), 0)
del buf350
triton_poi_fused_bmm_7[grid(16)](buf349, buf355, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf356 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf354, buf355, out=buf356)
buf357 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf356, buf357, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf358 = buf319
del buf319
triton_poi_fused_cat_8[grid(32)](buf353, buf349, buf357, buf358, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf359 = reinterpret_tensor(buf355, (4, 4, 1), (4, 1, 16), 0)
del buf355
triton_poi_fused_bmm_9[grid(16)](buf348, buf359, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf360 = reinterpret_tensor(buf354, (4, 1, 4), (4, 16, 1), 0)
del buf354
triton_poi_fused_bmm_10[grid(16)](buf349, buf360, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf361 = buf357
del buf357
extern_kernels.bmm(buf359, buf360, out=buf361)
buf362 = buf353
del buf353
triton_poi_fused__softmax_5[grid(64)](buf361, buf362, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf363 = buf329
del buf329
triton_poi_fused_cat_11[grid(48)](buf358, buf362, buf349, buf363,
48, XBLOCK=64, num_warps=1, num_stages=1)
buf364 = reinterpret_tensor(buf360, (4, 4, 1), (4, 1, 16), 0)
del buf360
triton_poi_fused_bmm_12[grid(16)](buf348, buf364, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf365 = reinterpret_tensor(buf359, (4, 1, 4), (4, 16, 1), 0)
del buf359
triton_poi_fused_bmm_13[grid(16)](buf349, buf365, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf366 = buf362
del buf362
extern_kernels.bmm(buf364, buf365, out=buf366)
buf367 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf366, buf367, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf368 = reinterpret_tensor(buf370, (4, 4, 4), (32, 8, 1), 4)
triton_poi_fused_cat_14[grid(64)](buf363, buf367, buf349, buf368,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf371 = reinterpret_tensor(buf367, (16, 4), (4, 1), 0)
del buf367
extern_kernels.mm(reinterpret_tensor(buf370, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_11, (8, 4), (1, 8), 0), out=buf371)
buf372 = reinterpret_tensor(buf371, (4, 4, 4), (16, 4, 1), 0)
del buf371
buf406 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_relu_15[grid(64)](buf372, primals_12, buf346,
buf406, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_12
buf373 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf372, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_13, (4, 8), (1, 4), 0), out=buf373)
buf374 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf344, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf374)
buf375 = reinterpret_tensor(buf374, (4, 4, 4), (16, 4, 1), 0)
del buf374
buf467 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(64)](buf375,
primals_16, buf467, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_16
buf376 = reinterpret_tensor(buf373, (4, 4, 8), (32, 8, 1), 0)
del buf373
buf468 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(128)](buf376,
primals_14, buf468, 128, XBLOCK=128, num_warps=4, num_stages=1)
del primals_14
buf377 = reinterpret_tensor(buf365, (4, 4, 1), (4, 1, 16), 0)
del buf365
triton_poi_fused_bmm_3[grid(16)](buf375, buf377, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf378 = reinterpret_tensor(buf364, (4, 1, 4), (4, 16, 1), 0)
del buf364
triton_poi_fused_bmm_4[grid(16)](buf376, buf378, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf379 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf377, buf378, out=buf379)
buf380 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf379, buf380, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf381 = reinterpret_tensor(buf378, (4, 4, 1), (4, 1, 16), 0)
del buf378
triton_poi_fused_bmm_6[grid(16)](buf375, buf381, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf382 = reinterpret_tensor(buf377, (4, 1, 4), (4, 16, 1), 0)
del buf377
triton_poi_fused_bmm_7[grid(16)](buf376, buf382, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf383 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf381, buf382, out=buf383)
buf384 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf383, buf384, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf385 = buf358
del buf358
triton_poi_fused_cat_8[grid(32)](buf380, buf376, buf384, buf385, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf386 = reinterpret_tensor(buf382, (4, 4, 1), (4, 1, 16), 0)
del buf382
triton_poi_fused_bmm_9[grid(16)](buf375, buf386, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf387 = reinterpret_tensor(buf381, (4, 1, 4), (4, 16, 1), 0)
del buf381
triton_poi_fused_bmm_10[grid(16)](buf376, buf387, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf388 = buf384
del buf384
extern_kernels.bmm(buf386, buf387, out=buf388)
buf389 = buf380
del buf380
triton_poi_fused__softmax_5[grid(64)](buf388, buf389, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf390 = buf363
del buf363
triton_poi_fused_cat_11[grid(48)](buf385, buf389, buf376, buf390,
48, XBLOCK=64, num_warps=1, num_stages=1)
buf391 = reinterpret_tensor(buf387, (4, 4, 1), (4, 1, 16), 0)
del buf387
triton_poi_fused_bmm_12[grid(16)](buf375, buf391, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf392 = reinterpret_tensor(buf386, (4, 1, 4), (4, 16, 1), 0)
del buf386
triton_poi_fused_bmm_13[grid(16)](buf376, buf392, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf393 = buf389
del buf389
extern_kernels.bmm(buf391, buf392, out=buf393)
buf394 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf393, buf394, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf395 = reinterpret_tensor(buf397, (4, 4, 4), (32, 8, 1), 4)
triton_poi_fused_cat_14[grid(64)](buf390, buf394, buf376, buf395,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf398 = reinterpret_tensor(buf394, (16, 4), (4, 1), 0)
del buf394
extern_kernels.mm(reinterpret_tensor(buf397, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_17, (8, 4), (1, 8), 0), out=buf398)
buf399 = reinterpret_tensor(buf392, (4, 4), (4, 1), 0)
del buf392
buf400 = buf399
del buf399
triton_poi_fused_add_div_relu_sum_16[grid(16)](buf400, buf398,
primals_18, buf344, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf401 = reinterpret_tensor(buf391, (4, 4), (4, 1), 0)
del buf391
triton_poi_fused_add_div_sum_17[grid(16)](buf372, buf346, buf401,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf402 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_20, buf400, reinterpret_tensor(
primals_19, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf402)
del primals_20
buf403 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_22, buf401, reinterpret_tensor(
primals_21, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf403)
del primals_22
buf404 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_relu_18[grid(64)](buf398, primals_18, buf344,
buf404, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf405 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf404, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_23, (4, 12), (1, 4), 0), out=buf405)
buf407 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf406, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_25, (4, 12), (1, 4), 0), out=buf407)
buf408 = reinterpret_tensor(buf405, (4, 4, 12), (48, 12, 1), 0)
del buf405
buf465 = empty_strided_cuda((4, 4, 12), (48, 12, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_19[grid(192)](buf408,
primals_24, buf465, 192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_24
buf409 = reinterpret_tensor(buf407, (4, 4, 12), (48, 12, 1), 0)
del buf407
buf464 = empty_strided_cuda((4, 4, 12), (48, 12, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_19[grid(192)](buf409,
primals_26, buf464, 192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_26
buf410 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf411 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf422 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_mul_20[grid(64)](buf403, buf408, buf410,
buf411, buf422, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf412 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_bmm_3[grid(16)](buf410, buf412, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf413 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
triton_poi_fused_bmm_3[grid(16)](buf411, buf413, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf414 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf412, buf413, out=buf414)
buf415 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf416 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf423 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_mul_20[grid(64)](buf402, buf409, buf415,
buf416, buf423, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf417 = reinterpret_tensor(buf413, (4, 4, 1), (4, 1, 16), 0)
del buf413
triton_poi_fused_bmm_3[grid(16)](buf415, buf417, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf418 = reinterpret_tensor(buf412, (4, 1, 4), (4, 16, 1), 0)
del buf412
triton_poi_fused_bmm_3[grid(16)](buf416, buf418, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf419 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf417, buf418, out=buf419)
buf420 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf414, buf420, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf421 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf419, buf421, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf424 = reinterpret_tensor(buf418, (4, 4, 1), (4, 1, 16), 0)
del buf418
triton_poi_fused_bmm_6[grid(16)](buf410, buf424, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf425 = reinterpret_tensor(buf417, (4, 1, 4), (4, 16, 1), 0)
del buf417
triton_poi_fused_bmm_6[grid(16)](buf411, buf425, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf426 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf424, buf425, out=buf426)
buf427 = reinterpret_tensor(buf425, (4, 4, 1), (4, 1, 16), 0)
del buf425
triton_poi_fused_bmm_6[grid(16)](buf415, buf427, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf428 = reinterpret_tensor(buf424, (4, 1, 4), (4, 16, 1), 0)
del buf424
triton_poi_fused_bmm_6[grid(16)](buf416, buf428, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf429 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf427, buf428, out=buf429)
buf430 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf426, buf430, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf431 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf429, buf431, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf432 = buf385
del buf385
triton_poi_fused_cat_21[grid(32)](buf420, buf422, buf430, buf432,
32, XBLOCK=32, num_warps=1, num_stages=1)
buf433 = buf318
del buf318
triton_poi_fused_cat_21[grid(32)](buf421, buf423, buf431, buf433,
32, XBLOCK=32, num_warps=1, num_stages=1)
buf434 = reinterpret_tensor(buf428, (4, 4, 1), (4, 1, 16), 0)
del buf428
triton_poi_fused_bmm_9[grid(16)](buf410, buf434, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf435 = reinterpret_tensor(buf427, (4, 1, 4), (4, 16, 1), 0)
del buf427
triton_poi_fused_bmm_9[grid(16)](buf411, buf435, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf436 = buf431
del buf431
extern_kernels.bmm(buf434, buf435, out=buf436)
buf437 = reinterpret_tensor(buf435, (4, 4, 1), (4, 1, 16), 0)
del buf435
triton_poi_fused_bmm_9[grid(16)](buf415, buf437, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf438 = reinterpret_tensor(buf434, (4, 1, 4), (4, 16, 1), 0)
del buf434
triton_poi_fused_bmm_9[grid(16)](buf416, buf438, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf439 = buf421
del buf421
extern_kernels.bmm(buf437, buf438, out=buf439)
buf440 = buf430
del buf430
triton_poi_fused__softmax_5[grid(64)](buf436, buf440, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf441 = buf420
del buf420
triton_poi_fused__softmax_5[grid(64)](buf439, buf441, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf442 = buf390
del buf390
triton_poi_fused_cat_22[grid(48)](buf432, buf440, buf422, buf442,
48, XBLOCK=64, num_warps=1, num_stages=1)
del buf432
buf443 = buf328
del buf328
triton_poi_fused_cat_22[grid(48)](buf433, buf441, buf423, buf443,
48, XBLOCK=64, num_warps=1, num_stages=1)
del buf433
buf444 = reinterpret_tensor(buf438, (4, 4, 1), (4, 1, 16), 0)
del buf438
triton_poi_fused_bmm_12[grid(16)](buf410, buf444, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf445 = reinterpret_tensor(buf437, (4, 1, 4), (4, 16, 1), 0)
del buf437
triton_poi_fused_bmm_12[grid(16)](buf411, buf445, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf446 = buf441
del buf441
extern_kernels.bmm(buf444, buf445, out=buf446)
buf447 = reinterpret_tensor(buf445, (4, 4, 1), (4, 1, 16), 0)
del buf445
triton_poi_fused_bmm_12[grid(16)](buf415, buf447, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf448 = reinterpret_tensor(buf444, (4, 1, 4), (4, 16, 1), 0)
del buf444
triton_poi_fused_bmm_12[grid(16)](buf416, buf448, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf449 = buf440
del buf440
extern_kernels.bmm(buf447, buf448, out=buf449)
del buf447
del buf448
buf450 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf446, buf450, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf451 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf449, buf451, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf452 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf454 = buf452
del buf452
triton_poi_fused_add_cat_23[grid(64)](buf454, buf442, buf450,
buf422, buf404, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf442
buf453 = buf450
del buf450
buf456 = buf453
del buf453
triton_poi_fused_add_cat_23[grid(64)](buf456, buf443, buf451,
buf423, buf406, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf443
buf455 = reinterpret_tensor(buf451, (16, 4), (4, 1), 0)
del buf451
extern_kernels.mm(reinterpret_tensor(buf454, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_27, (4, 4), (1, 4), 0), out=buf455)
buf457 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf456, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_29, (4, 4), (1, 4), 0), out=buf457)
buf458 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf459 = buf458
del buf458
buf463 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf466 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf473 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf476 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf483 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf486 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf493 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf496 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf503 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_add_relu_threshold_backward_26[grid(64)](buf459,
buf2, buf56, primals_18, buf113, primals_28, buf170, buf227,
buf284, buf341, buf398, buf455, buf463, buf466, buf473, buf476,
buf483, buf486, buf493, buf496, buf503, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf113
del buf170
del buf227
del buf284
del buf341
del buf398
del buf455
del primals_18
del primals_28
buf460 = reinterpret_tensor(buf56, (4, 4, 4), (16, 4, 1), 0)
del buf56
buf461 = buf460
del buf460
buf462 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf472 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf482 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf492 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf469 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf479 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf489 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf499 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf502 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_add_relu_threshold_backward_27[grid(64)](buf461,
buf4, buf30, buf115, primals_30, buf144, buf229, buf258, buf343,
buf372, buf457, buf462, buf472, buf482, buf492, buf469, buf479,
buf489, buf499, buf502, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf115
del buf229
del buf343
del buf457
del primals_30
return (buf459, buf461, reinterpret_tensor(primals_3, (16, 4), (4, 1),
0), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0),
reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(
buf4, (16, 4), (4, 1), 0), buf10, reinterpret_tensor(buf7, (4, 1, 4,
1), (32, 32, 8, 1), 4), buf14, reinterpret_tensor(buf7, (4, 1, 4, 1
), (32, 32, 8, 1), 5), buf19, reinterpret_tensor(buf7, (4, 1, 4, 1),
(32, 32, 8, 1), 6), buf24, reinterpret_tensor(buf7, (4, 1, 4, 1), (
32, 32, 8, 1), 7), reinterpret_tensor(buf28, (16, 8), (8, 1), 0),
reinterpret_tensor(buf30, (16, 4), (4, 1), 0), buf37,
reinterpret_tensor(buf34, (4, 1, 4, 1), (32, 32, 8, 1), 4), buf41,
reinterpret_tensor(buf34, (4, 1, 4, 1), (32, 32, 8, 1), 5), buf46,
reinterpret_tensor(buf34, (4, 1, 4, 1), (32, 32, 8, 1), 6), buf51,
reinterpret_tensor(buf34, (4, 1, 4, 1), (32, 32, 8, 1), 7),
reinterpret_tensor(buf55, (16, 8), (8, 1), 0), buf58, buf59, buf60,
buf61, reinterpret_tensor(buf62, (16, 4), (4, 1), 0),
reinterpret_tensor(buf64, (16, 4), (4, 1), 0), reinterpret_tensor(
buf66, (4, 4, 4), (48, 12, 1), 0), reinterpret_tensor(buf66, (4, 4,
4), (48, 12, 1), 4), reinterpret_tensor(buf66, (4, 4, 4), (48, 12,
1), 8), reinterpret_tensor(buf67, (4, 4, 4), (48, 12, 1), 0),
reinterpret_tensor(buf67, (4, 4, 4), (48, 12, 1), 4),
reinterpret_tensor(buf67, (4, 4, 4), (48, 12, 1), 8), buf72, buf77,
reinterpret_tensor(buf80, (4, 1, 4, 1), (16, 16, 4, 1), 0),
reinterpret_tensor(buf81, (4, 1, 4, 1), (16, 16, 4, 1), 0), buf84,
buf87, reinterpret_tensor(buf80, (4, 1, 4, 1), (16, 16, 4, 1), 1),
reinterpret_tensor(buf81, (4, 1, 4, 1), (16, 16, 4, 1), 1), buf94,
buf97, reinterpret_tensor(buf80, (4, 1, 4, 1), (16, 16, 4, 1), 2),
reinterpret_tensor(buf81, (4, 1, 4, 1), (16, 16, 4, 1), 2), buf104,
buf107, reinterpret_tensor(buf80, (4, 1, 4, 1), (16, 16, 4, 1), 3),
reinterpret_tensor(buf81, (4, 1, 4, 1), (16, 16, 4, 1), 3),
reinterpret_tensor(buf112, (16, 4), (4, 1), 0), reinterpret_tensor(
buf114, (16, 4), (4, 1), 0), reinterpret_tensor(buf116, (16, 4), (4,
1), 0), reinterpret_tensor(buf118, (16, 4), (4, 1), 0), buf124,
reinterpret_tensor(buf121, (4, 1, 4, 1), (32, 32, 8, 1), 4), buf128,
reinterpret_tensor(buf121, (4, 1, 4, 1), (32, 32, 8, 1), 5), buf133,
reinterpret_tensor(buf121, (4, 1, 4, 1), (32, 32, 8, 1), 6), buf138,
reinterpret_tensor(buf121, (4, 1, 4, 1), (32, 32, 8, 1), 7),
reinterpret_tensor(buf142, (16, 8), (8, 1), 0), reinterpret_tensor(
buf144, (16, 4), (4, 1), 0), buf151, reinterpret_tensor(buf148, (4,
1, 4, 1), (32, 32, 8, 1), 4), buf155, reinterpret_tensor(buf148, (4,
1, 4, 1), (32, 32, 8, 1), 5), buf160, reinterpret_tensor(buf148, (4,
1, 4, 1), (32, 32, 8, 1), 6), buf165, reinterpret_tensor(buf148, (4,
1, 4, 1), (32, 32, 8, 1), 7), reinterpret_tensor(buf169, (16, 8), (
8, 1), 0), buf172, buf173, buf174, buf175, reinterpret_tensor(
buf176, (16, 4), (4, 1), 0), reinterpret_tensor(buf178, (16, 4), (4,
1), 0), reinterpret_tensor(buf180, (4, 4, 4), (48, 12, 1), 0),
reinterpret_tensor(buf180, (4, 4, 4), (48, 12, 1), 4),
reinterpret_tensor(buf180, (4, 4, 4), (48, 12, 1), 8),
reinterpret_tensor(buf181, (4, 4, 4), (48, 12, 1), 0),
reinterpret_tensor(buf181, (4, 4, 4), (48, 12, 1), 4),
reinterpret_tensor(buf181, (4, 4, 4), (48, 12, 1), 8), buf186,
buf191, reinterpret_tensor(buf194, (4, 1, 4, 1), (16, 16, 4, 1), 0),
reinterpret_tensor(buf195, (4, 1, 4, 1), (16, 16, 4, 1), 0), buf198,
buf201, reinterpret_tensor(buf194, (4, 1, 4, 1), (16, 16, 4, 1), 1),
reinterpret_tensor(buf195, (4, 1, 4, 1), (16, 16, 4, 1), 1), buf208,
buf211, reinterpret_tensor(buf194, (4, 1, 4, 1), (16, 16, 4, 1), 2),
reinterpret_tensor(buf195, (4, 1, 4, 1), (16, 16, 4, 1), 2), buf218,
buf221, reinterpret_tensor(buf194, (4, 1, 4, 1), (16, 16, 4, 1), 3),
reinterpret_tensor(buf195, (4, 1, 4, 1), (16, 16, 4, 1), 3),
reinterpret_tensor(buf226, (16, 4), (4, 1), 0), reinterpret_tensor(
buf228, (16, 4), (4, 1), 0), reinterpret_tensor(buf230, (16, 4), (4,
1), 0), reinterpret_tensor(buf232, (16, 4), (4, 1), 0), buf238,
reinterpret_tensor(buf235, (4, 1, 4, 1), (32, 32, 8, 1), 4), buf242,
reinterpret_tensor(buf235, (4, 1, 4, 1), (32, 32, 8, 1), 5), buf247,
reinterpret_tensor(buf235, (4, 1, 4, 1), (32, 32, 8, 1), 6), buf252,
reinterpret_tensor(buf235, (4, 1, 4, 1), (32, 32, 8, 1), 7),
reinterpret_tensor(buf256, (16, 8), (8, 1), 0), reinterpret_tensor(
buf258, (16, 4), (4, 1), 0), buf265, reinterpret_tensor(buf262, (4,
1, 4, 1), (32, 32, 8, 1), 4), buf269, reinterpret_tensor(buf262, (4,
1, 4, 1), (32, 32, 8, 1), 5), buf274, reinterpret_tensor(buf262, (4,
1, 4, 1), (32, 32, 8, 1), 6), buf279, reinterpret_tensor(buf262, (4,
1, 4, 1), (32, 32, 8, 1), 7), reinterpret_tensor(buf283, (16, 8), (
8, 1), 0), buf286, buf287, buf288, buf289, reinterpret_tensor(
buf290, (16, 4), (4, 1), 0), reinterpret_tensor(buf292, (16, 4), (4,
1), 0), reinterpret_tensor(buf294, (4, 4, 4), (48, 12, 1), 0),
reinterpret_tensor(buf294, (4, 4, 4), (48, 12, 1), 4),
reinterpret_tensor(buf294, (4, 4, 4), (48, 12, 1), 8),
reinterpret_tensor(buf295, (4, 4, 4), (48, 12, 1), 0),
reinterpret_tensor(buf295, (4, 4, 4), (48, 12, 1), 4),
reinterpret_tensor(buf295, (4, 4, 4), (48, 12, 1), 8), buf300,
buf305, reinterpret_tensor(buf308, (4, 1, 4, 1), (16, 16, 4, 1), 0),
reinterpret_tensor(buf309, (4, 1, 4, 1), (16, 16, 4, 1), 0), buf312,
buf315, reinterpret_tensor(buf308, (4, 1, 4, 1), (16, 16, 4, 1), 1),
reinterpret_tensor(buf309, (4, 1, 4, 1), (16, 16, 4, 1), 1), buf322,
buf325, reinterpret_tensor(buf308, (4, 1, 4, 1), (16, 16, 4, 1), 2),
reinterpret_tensor(buf309, (4, 1, 4, 1), (16, 16, 4, 1), 2), buf332,
buf335, reinterpret_tensor(buf308, (4, 1, 4, 1), (16, 16, 4, 1), 3),
reinterpret_tensor(buf309, (4, 1, 4, 1), (16, 16, 4, 1), 3),
reinterpret_tensor(buf340, (16, 4), (4, 1), 0), reinterpret_tensor(
buf342, (16, 4), (4, 1), 0), reinterpret_tensor(buf344, (16, 4), (4,
1), 0), reinterpret_tensor(buf346, (16, 4), (4, 1), 0), buf352,
reinterpret_tensor(buf349, (4, 1, 4, 1), (32, 32, 8, 1), 4), buf356,
reinterpret_tensor(buf349, (4, 1, 4, 1), (32, 32, 8, 1), 5), buf361,
reinterpret_tensor(buf349, (4, 1, 4, 1), (32, 32, 8, 1), 6), buf366,
reinterpret_tensor(buf349, (4, 1, 4, 1), (32, 32, 8, 1), 7),
reinterpret_tensor(buf370, (16, 8), (8, 1), 0), reinterpret_tensor(
buf372, (16, 4), (4, 1), 0), buf379, reinterpret_tensor(buf376, (4,
1, 4, 1), (32, 32, 8, 1), 4), buf383, reinterpret_tensor(buf376, (4,
1, 4, 1), (32, 32, 8, 1), 5), buf388, reinterpret_tensor(buf376, (4,
1, 4, 1), (32, 32, 8, 1), 6), buf393, reinterpret_tensor(buf376, (4,
1, 4, 1), (32, 32, 8, 1), 7), reinterpret_tensor(buf397, (16, 8), (
8, 1), 0), buf400, buf401, buf402, buf403, reinterpret_tensor(
buf404, (16, 4), (4, 1), 0), reinterpret_tensor(buf406, (16, 4), (4,
1), 0), reinterpret_tensor(buf408, (4, 4, 4), (48, 12, 1), 0),
reinterpret_tensor(buf408, (4, 4, 4), (48, 12, 1), 4),
reinterpret_tensor(buf408, (4, 4, 4), (48, 12, 1), 8),
reinterpret_tensor(buf409, (4, 4, 4), (48, 12, 1), 0),
reinterpret_tensor(buf409, (4, 4, 4), (48, 12, 1), 4),
reinterpret_tensor(buf409, (4, 4, 4), (48, 12, 1), 8), buf414,
buf419, reinterpret_tensor(buf422, (4, 1, 4, 1), (16, 16, 4, 1), 0),
reinterpret_tensor(buf423, (4, 1, 4, 1), (16, 16, 4, 1), 0), buf426,
buf429, reinterpret_tensor(buf422, (4, 1, 4, 1), (16, 16, 4, 1), 1),
reinterpret_tensor(buf423, (4, 1, 4, 1), (16, 16, 4, 1), 1), buf436,
buf439, reinterpret_tensor(buf422, (4, 1, 4, 1), (16, 16, 4, 1), 2),
reinterpret_tensor(buf423, (4, 1, 4, 1), (16, 16, 4, 1), 2), buf446,
buf449, reinterpret_tensor(buf422, (4, 1, 4, 1), (16, 16, 4, 1), 3),
reinterpret_tensor(buf423, (4, 1, 4, 1), (16, 16, 4, 1), 3),
reinterpret_tensor(buf454, (16, 4), (4, 1), 0), reinterpret_tensor(
buf456, (16, 4), (4, 1), 0), buf462, primals_29, buf463, primals_27,
reinterpret_tensor(buf415, (4, 1, 4), (16, 1, 4), 3),
reinterpret_tensor(buf416, (4, 4, 1), (16, 4, 1), 3),
reinterpret_tensor(buf410, (4, 1, 4), (16, 1, 4), 3),
reinterpret_tensor(buf411, (4, 4, 1), (16, 4, 1), 3),
reinterpret_tensor(buf415, (4, 1, 4), (16, 1, 4), 2),
reinterpret_tensor(buf416, (4, 4, 1), (16, 4, 1), 2),
reinterpret_tensor(buf410, (4, 1, 4), (16, 1, 4), 2),
reinterpret_tensor(buf411, (4, 4, 1), (16, 4, 1), 2),
reinterpret_tensor(buf415, (4, 1, 4), (16, 1, 4), 1),
reinterpret_tensor(buf416, (4, 4, 1), (16, 4, 1), 1),
reinterpret_tensor(buf410, (4, 1, 4), (16, 1, 4), 1),
reinterpret_tensor(buf411, (4, 4, 1), (16, 4, 1), 1),
reinterpret_tensor(buf415, (4, 1, 4), (16, 1, 4), 0),
reinterpret_tensor(buf416, (4, 4, 1), (16, 4, 1), 0),
reinterpret_tensor(buf410, (4, 1, 4), (16, 1, 4), 0),
reinterpret_tensor(buf411, (4, 4, 1), (16, 4, 1), 0), buf464,
primals_25, buf465, primals_23, primals_21, primals_19, buf466,
primals_17, reinterpret_tensor(buf375, (4, 1, 4), (16, 1, 4), 3),
reinterpret_tensor(buf376, (4, 4, 1), (32, 8, 1), 3),
reinterpret_tensor(buf375, (4, 1, 4), (16, 1, 4), 2),
reinterpret_tensor(buf376, (4, 4, 1), (32, 8, 1), 2),
reinterpret_tensor(buf375, (4, 1, 4), (16, 1, 4), 1),
reinterpret_tensor(buf376, (4, 4, 1), (32, 8, 1), 1),
reinterpret_tensor(buf375, (4, 1, 4), (16, 1, 4), 0),
reinterpret_tensor(buf376, (4, 4, 1), (32, 8, 1), 0), buf467,
primals_15, buf468, primals_13, buf469, primals_11,
reinterpret_tensor(buf348, (4, 1, 4), (16, 1, 4), 3),
reinterpret_tensor(buf349, (4, 4, 1), (32, 8, 1), 3),
reinterpret_tensor(buf348, (4, 1, 4), (16, 1, 4), 2),
reinterpret_tensor(buf349, (4, 4, 1), (32, 8, 1), 2),
reinterpret_tensor(buf348, (4, 1, 4), (16, 1, 4), 1),
reinterpret_tensor(buf349, (4, 4, 1), (32, 8, 1), 1),
reinterpret_tensor(buf348, (4, 1, 4), (16, 1, 4), 0),
reinterpret_tensor(buf349, (4, 4, 1), (32, 8, 1), 0), buf470,
primals_9, buf471, primals_7, buf472, buf473, reinterpret_tensor(
buf301, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf302, (4, 4,
1), (16, 4, 1), 3), reinterpret_tensor(buf296, (4, 1, 4), (16, 1, 4
), 3), reinterpret_tensor(buf297, (4, 4, 1), (16, 4, 1), 3),
reinterpret_tensor(buf301, (4, 1, 4), (16, 1, 4), 2),
reinterpret_tensor(buf302, (4, 4, 1), (16, 4, 1), 2),
reinterpret_tensor(buf296, (4, 1, 4), (16, 1, 4), 2),
reinterpret_tensor(buf297, (4, 4, 1), (16, 4, 1), 2),
reinterpret_tensor(buf301, (4, 1, 4), (16, 1, 4), 1),
reinterpret_tensor(buf302, (4, 4, 1), (16, 4, 1), 1),
reinterpret_tensor(buf296, (4, 1, 4), (16, 1, 4), 1),
reinterpret_tensor(buf297, (4, 4, 1), (16, 4, 1), 1),
reinterpret_tensor(buf301, (4, 1, 4), (16, 1, 4), 0),
reinterpret_tensor(buf302, (4, 4, 1), (16, 4, 1), 0),
reinterpret_tensor(buf296, (4, 1, 4), (16, 1, 4), 0),
reinterpret_tensor(buf297, (4, 4, 1), (16, 4, 1), 0), buf474,
buf475, buf476, reinterpret_tensor(buf261, (4, 1, 4), (16, 1, 4), 3
), reinterpret_tensor(buf262, (4, 4, 1), (32, 8, 1), 3),
reinterpret_tensor(buf261, (4, 1, 4), (16, 1, 4), 2),
reinterpret_tensor(buf262, (4, 4, 1), (32, 8, 1), 2),
reinterpret_tensor(buf261, (4, 1, 4), (16, 1, 4), 1),
reinterpret_tensor(buf262, (4, 4, 1), (32, 8, 1), 1),
reinterpret_tensor(buf261, (4, 1, 4), (16, 1, 4), 0),
reinterpret_tensor(buf262, (4, 4, 1), (32, 8, 1), 0), buf477,
buf478, buf479, reinterpret_tensor(buf234, (4, 1, 4), (16, 1, 4), 3
), reinterpret_tensor(buf235, (4, 4, 1), (32, 8, 1), 3),
reinterpret_tensor(buf234, (4, 1, 4), (16, 1, 4), 2),
reinterpret_tensor(buf235, (4, 4, 1), (32, 8, 1), 2),
reinterpret_tensor(buf234, (4, 1, 4), (16, 1, 4), 1),
reinterpret_tensor(buf235, (4, 4, 1), (32, 8, 1), 1),
reinterpret_tensor(buf234, (4, 1, 4), (16, 1, 4), 0),
reinterpret_tensor(buf235, (4, 4, 1), (32, 8, 1), 0), buf480,
buf481, buf482, buf483, reinterpret_tensor(buf187, (4, 1, 4), (16,
1, 4), 3), reinterpret_tensor(buf188, (4, 4, 1), (16, 4, 1), 3),
reinterpret_tensor(buf182, (4, 1, 4), (16, 1, 4), 3),
reinterpret_tensor(buf183, (4, 4, 1), (16, 4, 1), 3),
reinterpret_tensor(buf187, (4, 1, 4), (16, 1, 4), 2),
reinterpret_tensor(buf188, (4, 4, 1), (16, 4, 1), 2),
reinterpret_tensor(buf182, (4, 1, 4), (16, 1, 4), 2),
reinterpret_tensor(buf183, (4, 4, 1), (16, 4, 1), 2),
reinterpret_tensor(buf187, (4, 1, 4), (16, 1, 4), 1),
reinterpret_tensor(buf188, (4, 4, 1), (16, 4, 1), 1),
reinterpret_tensor(buf182, (4, 1, 4), (16, 1, 4), 1),
reinterpret_tensor(buf183, (4, 4, 1), (16, 4, 1), 1),
reinterpret_tensor(buf187, (4, 1, 4), (16, 1, 4), 0),
reinterpret_tensor(buf188, (4, 4, 1), (16, 4, 1), 0),
reinterpret_tensor(buf182, (4, 1, 4), (16, 1, 4), 0),
reinterpret_tensor(buf183, (4, 4, 1), (16, 4, 1), 0), buf484,
buf485, buf486, reinterpret_tensor(buf147, (4, 1, 4), (16, 1, 4), 3
), reinterpret_tensor(buf148, (4, 4, 1), (32, 8, 1), 3),
reinterpret_tensor(buf147, (4, 1, 4), (16, 1, 4), 2),
reinterpret_tensor(buf148, (4, 4, 1), (32, 8, 1), 2),
reinterpret_tensor(buf147, (4, 1, 4), (16, 1, 4), 1),
reinterpret_tensor(buf148, (4, 4, 1), (32, 8, 1), 1),
reinterpret_tensor(buf147, (4, 1, 4), (16, 1, 4), 0),
reinterpret_tensor(buf148, (4, 4, 1), (32, 8, 1), 0), buf487,
buf488, buf489, reinterpret_tensor(buf120, (4, 1, 4), (16, 1, 4), 3
), reinterpret_tensor(buf121, (4, 4, 1), (32, 8, 1), 3),
reinterpret_tensor(buf120, (4, 1, 4), (16, 1, 4), 2),
reinterpret_tensor(buf121, (4, 4, 1), (32, 8, 1), 2),
reinterpret_tensor(buf120, (4, 1, 4), (16, 1, 4), 1),
reinterpret_tensor(buf121, (4, 4, 1), (32, 8, 1), 1),
reinterpret_tensor(buf120, (4, 1, 4), (16, 1, 4), 0),
reinterpret_tensor(buf121, (4, 4, 1), (32, 8, 1), 0), buf490,
buf491, buf492, buf493, reinterpret_tensor(buf73, (4, 1, 4), (16, 1,
4), 3), reinterpret_tensor(buf74, (4, 4, 1), (16, 4, 1), 3),
reinterpret_tensor(buf68, (4, 1, 4), (16, 1, 4), 3),
reinterpret_tensor(buf69, (4, 4, 1), (16, 4, 1), 3),
reinterpret_tensor(buf73, (4, 1, 4), (16, 1, 4), 2),
reinterpret_tensor(buf74, (4, 4, 1), (16, 4, 1), 2),
reinterpret_tensor(buf68, (4, 1, 4), (16, 1, 4), 2),
reinterpret_tensor(buf69, (4, 4, 1), (16, 4, 1), 2),
reinterpret_tensor(buf73, (4, 1, 4), (16, 1, 4), 1),
reinterpret_tensor(buf74, (4, 4, 1), (16, 4, 1), 1),
reinterpret_tensor(buf68, (4, 1, 4), (16, 1, 4), 1),
reinterpret_tensor(buf69, (4, 4, 1), (16, 4, 1), 1),
reinterpret_tensor(buf73, (4, 1, 4), (16, 1, 4), 0),
reinterpret_tensor(buf74, (4, 4, 1), (16, 4, 1), 0),
reinterpret_tensor(buf68, (4, 1, 4), (16, 1, 4), 0),
reinterpret_tensor(buf69, (4, 4, 1), (16, 4, 1), 0), buf494, buf495,
buf496, reinterpret_tensor(buf33, (4, 1, 4), (16, 1, 4), 3),
reinterpret_tensor(buf34, (4, 4, 1), (32, 8, 1), 3),
reinterpret_tensor(buf33, (4, 1, 4), (16, 1, 4), 2),
reinterpret_tensor(buf34, (4, 4, 1), (32, 8, 1), 2),
reinterpret_tensor(buf33, (4, 1, 4), (16, 1, 4), 1),
reinterpret_tensor(buf34, (4, 4, 1), (32, 8, 1), 1),
reinterpret_tensor(buf33, (4, 1, 4), (16, 1, 4), 0),
reinterpret_tensor(buf34, (4, 4, 1), (32, 8, 1), 0), buf497, buf498,
buf499, reinterpret_tensor(buf6, (4, 1, 4), (16, 1, 4), 3),
reinterpret_tensor(buf7, (4, 4, 1), (32, 8, 1), 3),
reinterpret_tensor(buf6, (4, 1, 4), (16, 1, 4), 2),
reinterpret_tensor(buf7, (4, 4, 1), (32, 8, 1), 2),
reinterpret_tensor(buf6, (4, 1, 4), (16, 1, 4), 1),
reinterpret_tensor(buf7, (4, 4, 1), (32, 8, 1), 1),
reinterpret_tensor(buf6, (4, 1, 4), (16, 1, 4), 0),
reinterpret_tensor(buf7, (4, 4, 1), (32, 8, 1), 0), buf500, buf501,
buf502, buf503)
class FCNet(nn.Module):
def __init__(self, in_size, out_size, activate=None, drop=0.0):
super(FCNet, self).__init__()
self.lin = nn.Linear(in_size, out_size)
self.drop_value = drop
self.drop = nn.Dropout(drop)
self.activate = activate.lower() if activate is not None else None
if activate == 'relu':
self.ac_fn = nn.ReLU()
elif activate == 'sigmoid':
self.ac_fn = nn.Sigmoid()
elif activate == 'tanh':
self.ac_fn = nn.Tanh()
def forward(self, x):
if self.drop_value > 0:
x = self.drop(x)
x = self.lin(x)
if self.activate is not None:
x = self.ac_fn(x)
return x
class OneSideInterModalityUpdate(nn.Module):
"""
one-side Inter-Modality Attention Flow
according to the paper, instead of parallel V->Q & Q->V, we first to V->Q and then Q->V
"""
def __init__(self, src_size, tgt_size, output_size, num_head, drop=0.0):
super(OneSideInterModalityUpdate, self).__init__()
self.src_size = src_size
self.tgt_size = tgt_size
self.output_size = output_size
self.num_head = num_head
self.src_lin = FCNet(src_size, output_size * 2, drop=drop, activate
='relu')
self.tgt_lin = FCNet(tgt_size, output_size, drop=drop, activate='relu')
self.tgt_output = FCNet(output_size + tgt_size, output_size, drop=
drop, activate='relu')
def forward(self, src, tgt):
"""
:param src: eeg feature [batch, regions, feature_size]
:param tgt: eye feature [batch, regions, feature_size]
:return:
"""
_batch_size, _num_src = src.shape[0], src.shape[1]
tgt.shape[1]
src_tran = self.src_lin(src)
tgt_tran = self.tgt_lin(tgt)
src_key, src_val = torch.split(src_tran, src_tran.size(2) // 2, dim=2)
tgt_query = tgt_tran
src_key_set = torch.split(src_key, src_key.size(2) // self.num_head,
dim=2)
src_val_set = torch.split(src_val, src_val.size(2) // self.num_head,
dim=2)
tgt_query_set = torch.split(tgt_query, tgt_query.size(2) // self.
num_head, dim=2)
for i in range(self.num_head):
src_key_slice, tgt_query_slice, src_val_slice = src_key_set[i
], tgt_query_set[i], src_val_set[i]
src2tgt = tgt_query_slice @ src_key_slice.transpose(1, 2) / (self
.output_size // self.num_head) ** 0.5
interMAF_src2tgt = F.softmax(src2tgt, dim=2).unsqueeze(3)
tgt_update = (interMAF_src2tgt * src_val_slice.unsqueeze(1)).sum(2
) if i == 0 else torch.cat((tgt_update, (interMAF_src2tgt *
src_val_slice.unsqueeze(1)).sum(2)), dim=2)
cat_tgt = torch.cat((tgt, tgt_update), dim=2)
tgt_updated = self.tgt_output(cat_tgt)
return tgt_updated
class DyIntraModalityUpdate(nn.Module):
"""
Dynamic Intra-Modality Attention Flow
"""
def __init__(self, v_size, q_size, output_size, num_head, drop=0.0):
super(DyIntraModalityUpdate, self).__init__()
self.v_size = v_size
self.q_size = q_size
self.output_size = output_size
self.num_head = num_head
self.v4q_gate_lin = FCNet(v_size, output_size, drop=drop)
self.q4v_gate_lin = FCNet(q_size, output_size, drop=drop)
self.v_lin = FCNet(v_size, output_size * 3, drop=drop, activate='relu')
self.q_lin = FCNet(q_size, output_size * 3, drop=drop, activate='relu')
self.v_output = FCNet(output_size, output_size, drop=drop, activate
='relu')
self.q_output = FCNet(output_size, output_size, drop=drop, activate
='relu')
self.relu = nn.ReLU()
self.tanh = nn.Tanh()
self.sigmoid = nn.Sigmoid()
def forward(self, v, q):
"""
:param v: [batch_size, num_obj, feature_size]
:param q: [batch_size, max_len, feature_size]
:return:
"""
_batch_size, num_obj = v.shape[0], v.shape[1]
max_len = q.shape[1]
v_mean = v.sum(1) / num_obj
q_mean = q.sum(1) / max_len
v4q_gate = self.sigmoid(self.v4q_gate_lin(v_mean)).unsqueeze(1)
q4v_gate = self.sigmoid(self.q4v_gate_lin(q_mean)).unsqueeze(1)
v_tran = self.v_lin(v)
q_tran = self.q_lin(q)
v_key, v_query, v_val = torch.split(v_tran, v_tran.size(2) // 3, dim=2)
q_key, q_query, q_val = torch.split(q_tran, q_tran.size(2) // 3, dim=2)
gated_v_query = (1 + q4v_gate) * v_query
gated_v_key = (1 + q4v_gate) * v_key
gated_v_val = (1 + q4v_gate) * v_val
gated_q_query = (1 + v4q_gate) * q_query
gated_q_key = (1 + v4q_gate) * q_key
gated_q_val = (1 + v4q_gate) * q_val
v_key_set = torch.split(gated_v_key, gated_v_key.size(2) // self.
num_head, dim=2)
v_query_set = torch.split(gated_v_query, gated_v_query.size(2) //
self.num_head, dim=2)
v_val_set = torch.split(gated_v_val, gated_v_val.size(2) // self.
num_head, dim=2)
q_key_set = torch.split(gated_q_key, gated_q_key.size(2) // self.
num_head, dim=2)
q_query_set = torch.split(gated_q_query, gated_q_query.size(2) //
self.num_head, dim=2)
q_val_set = torch.split(gated_q_val, gated_q_val.size(2) // self.
num_head, dim=2)
for i in range(self.num_head):
v_key_slice, v_query_slice, v_val_slice = v_key_set[i
], v_query_set[i], v_val_set[i]
q_key_slice, q_query_slice, q_val_slice = q_key_set[i
], q_query_set[i], q_val_set[i]
v2v = v_query_slice @ v_key_slice.transpose(1, 2) / (self.
output_size // self.num_head) ** 0.5
q2q = q_query_slice @ q_key_slice.transpose(1, 2) / (self.
output_size // self.num_head) ** 0.5
dyIntranMAF_v2v = F.softmax(v2v, dim=2).unsqueeze(3)
dyIntranMAF_q2q = F.softmax(q2q, dim=2).unsqueeze(3)
v_update = (dyIntranMAF_v2v * v_val_slice.unsqueeze(1)).sum(2
) if i == 0 else torch.cat((v_update, (dyIntranMAF_v2v *
v_val_slice.unsqueeze(1)).sum(2)), dim=2)
q_update = (dyIntranMAF_q2q * q_val_slice.unsqueeze(1)).sum(2
) if i == 0 else torch.cat((q_update, (dyIntranMAF_q2q *
q_val_slice.unsqueeze(1)).sum(2)), dim=2)
updated_v = self.v_output(v + v_update)
updated_q = self.q_output(q + q_update)
return updated_v, updated_q
class SingleBlockNew(nn.Module):
"""
Single Block Inter- and Intra modality stack multiple times, in such circumstance, all the
basic blocks share the same parameters in the model
"""
def __init__(self, num_blocks, v_size, q_size, output_size,
num_inter_head, num_intra_head, drop=0.0):
super(SingleBlockNew, self).__init__()
self.v_size = v_size
self.q_size = q_size
self.output_size = output_size
self.num_inter_head = num_inter_head
self.num_intra_head = num_intra_head
self.num_block = num_blocks
self.v_lin = FCNet(v_size, output_size, drop=drop, activate='relu')
self.q_lin = FCNet(q_size, output_size, drop=drop, activate='relu')
self.v2q_interBlock = OneSideInterModalityUpdate(output_size,
output_size, output_size, num_inter_head, drop)
self.q2v_interBlock = OneSideInterModalityUpdate(output_size,
output_size, output_size, num_inter_head, drop)
self.intraBlock = DyIntraModalityUpdate(output_size, output_size,
output_size, num_intra_head, drop)
def forward(self, input_0, input_1):
primals_1 = self.v_lin.lin.weight
primals_2 = self.v_lin.lin.bias
primals_4 = self.q_lin.lin.weight
primals_5 = self.q_lin.lin.bias
primals_7 = self.v2q_interBlock.src_lin.lin.weight
primals_8 = self.v2q_interBlock.src_lin.lin.bias
primals_9 = self.v2q_interBlock.tgt_lin.lin.weight
primals_10 = self.v2q_interBlock.tgt_lin.lin.bias
primals_11 = self.v2q_interBlock.tgt_output.lin.weight
primals_12 = self.v2q_interBlock.tgt_output.lin.bias
primals_13 = self.q2v_interBlock.src_lin.lin.weight
primals_14 = self.q2v_interBlock.src_lin.lin.bias
primals_15 = self.q2v_interBlock.tgt_lin.lin.weight
primals_16 = self.q2v_interBlock.tgt_lin.lin.bias
primals_17 = self.q2v_interBlock.tgt_output.lin.weight
primals_18 = self.q2v_interBlock.tgt_output.lin.bias
primals_19 = self.intraBlock.v4q_gate_lin.lin.weight
primals_20 = self.intraBlock.v4q_gate_lin.lin.bias
primals_21 = self.intraBlock.q4v_gate_lin.lin.weight
primals_22 = self.intraBlock.q4v_gate_lin.lin.bias
primals_23 = self.intraBlock.v_lin.lin.weight
primals_24 = self.intraBlock.v_lin.lin.bias
primals_25 = self.intraBlock.q_lin.lin.weight
primals_26 = self.intraBlock.q_lin.lin.bias
primals_27 = self.intraBlock.v_output.lin.weight
primals_28 = self.intraBlock.v_output.lin.bias
primals_29 = self.intraBlock.q_output.lin.weight
primals_30 = self.intraBlock.q_output.lin.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30])
return output[0], output[1]
| Ruiver/CTCNet | SingleBlock | false | 17,954 | [
"Apache-2.0"
] | 6 | 539e55ec9fed06028379d35dfd5cd4074755ffd8 | https://github.com/Ruiver/CTCNet/tree/539e55ec9fed06028379d35dfd5cd4074755ffd8 |
Subtract | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/5k/c5kd57cxdkzbcylghgp3jbfchensnm3y33mnc6b7n3yo2g7iotgp.py
# Topologically Sorted Source Nodes: [sub], Original ATen: [aten.sub]
# Source node to ATen node mapping:
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
triton_poi_fused_sub_0 = async_compile.triton('triton_poi_fused_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = tmp0 - tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub], Original ATen: [aten.sub]
stream0 = get_raw_stream(0)
triton_poi_fused_sub_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn
import torch.utils.data
import torch.utils.tensorboard._pytorch_graph
import torch.onnx.symbolic_caffe2
class Subtract(torch.nn.Module):
""" Subtract module for a functional subtract"""
def forward(self, x, y):
"""
Forward-pass routine for subtact op
"""
return x - y
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn
import torch.utils.data
import torch.utils.tensorboard._pytorch_graph
import torch.onnx.symbolic_caffe2
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 - tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_sub_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK
=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class SubtractNew(torch.nn.Module):
""" Subtract module for a functional subtract"""
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| Rohan-Chaudhury/aimet | Subtract | false | 17,955 | [
"BSD-3-Clause"
] | 3 | 1c38cac8cc0fd32dca40ce5e39940805d29f7a4a | https://github.com/Rohan-Chaudhury/aimet/tree/1c38cac8cc0fd32dca40ce5e39940805d29f7a4a |
SpectralConvergence | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/lc/clc4cnw5arjjr2rp5lrx7iohtgr7s4az6ejblprqdzldbiixh35g.py
# Topologically Sorted Source Nodes: [sub, norm, norm_1], Original ATen: [aten.sub, aten.linalg_vector_norm]
# Source node to ATen node mapping:
# norm => pow_1, sum_1
# norm_1 => pow_3, sum_2
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1, 2]), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [1, 2]), kwargs = {})
triton_per_fused_linalg_vector_norm_sub_0 = async_compile.triton('triton_per_fused_linalg_vector_norm_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_linalg_vector_norm_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_linalg_vector_norm_sub_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 4
x1 = (xindex // 4)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*r2) + (64*x1)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (x0 + (4*r2) + (64*x1)), xmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tmp0 * tmp0
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tl.store(out_ptr0 + (x3), tmp7, xmask)
tl.store(out_ptr1 + (x3), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/cd/ccd3afwoiorphmygkluiq5edbi5uztnqh5p4w4ox4l7pyg5etl4n.py
# Topologically Sorted Source Nodes: [norm, norm_1, truediv, mean], Original ATen: [aten.linalg_vector_norm, aten.div, aten.mean]
# Source node to ATen node mapping:
# mean => mean
# norm => pow_2
# norm_1 => pow_4
# truediv => div
# Graph fragment:
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%pow_2, %pow_4), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%div,), kwargs = {})
triton_per_fused_div_linalg_vector_norm_mean_1 = async_compile.triton('triton_per_fused_div_linalg_vector_norm_mean_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_linalg_vector_norm_mean_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_linalg_vector_norm_mean_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp2 = tl.load(in_ptr1 + (r0), None)
tmp1 = libdevice.sqrt(tmp0)
tmp3 = libdevice.sqrt(tmp2)
tmp4 = tmp1 / tmp3
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = tl.sum(tmp5, 1)[:, None]
tmp8 = 16.0
tmp9 = tmp7 / tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp9, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, norm, norm_1], Original ATen: [aten.sub, aten.linalg_vector_norm]
stream0 = get_raw_stream(0)
triton_per_fused_linalg_vector_norm_sub_0.run(arg0_1, arg1_1, buf0, buf1, 16, 16, grid=grid(16), stream=stream0)
del arg0_1
del arg1_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [norm, norm_1, truediv, mean], Original ATen: [aten.linalg_vector_norm, aten.div, aten.mean]
triton_per_fused_div_linalg_vector_norm_mean_1.run(buf3, buf0, buf1, 1, 16, grid=grid(1), stream=stream0)
del buf0
del buf1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class SpectralConvergence(nn.Module):
def __init__(self):
"""Initilize spectral convergence loss module."""
super().__init__()
def forward(self, predicts_mag, targets_mag):
"""Calculate norm of difference operator.
Args:
predicts_mag (Tensor): Magnitude spectrogram of predicted signal (B, #frames, #freq_bins).
targets_mag (Tensor): Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins).
Returns:
Tensor: Spectral convergence loss value.
"""
return torch.mean(torch.norm(targets_mag - predicts_mag, dim=(1, 2),
p='fro') / torch.norm(targets_mag, dim=(1, 2), p='fro'))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_linalg_vector_norm_sub_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 4
x1 = xindex // 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * r2 + 64 * x1), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (x0 + 4 * r2 + 64 * x1), xmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tmp0 * tmp0
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tl.store(out_ptr0 + x3, tmp7, xmask)
tl.store(out_ptr1 + x3, tmp12, xmask)
@triton.jit
def triton_per_fused_div_linalg_vector_norm_mean_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp2 = tl.load(in_ptr1 + r0, None)
tmp1 = libdevice.sqrt(tmp0)
tmp3 = libdevice.sqrt(tmp2)
tmp4 = tmp1 / tmp3
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = tl.sum(tmp5, 1)[:, None]
tmp8 = 16.0
tmp9 = tmp7 / tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp9, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_linalg_vector_norm_sub_0[grid(16)](arg0_1, arg1_1,
buf0, buf1, 16, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
triton_per_fused_div_linalg_vector_norm_mean_1[grid(1)](buf3, buf0,
buf1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
return buf3,
class SpectralConvergenceNew(nn.Module):
def __init__(self):
"""Initilize spectral convergence loss module."""
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| SolomidHero/speech-regeneration-enhancer | SpectralConvergence | false | 17,956 | [
"MIT"
] | 8 | eb43907ff085d68a707ff7bc3af14e93ff66fd65 | https://github.com/SolomidHero/speech-regeneration-enhancer/tree/eb43907ff085d68a707ff7bc3af14e93ff66fd65 |
GumbelSoftmaxLayer | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/su/csubpzatspfqrmregpjrvpx2ttwsh5jt74dgggbuzg33aj7rykhj.py
# Topologically Sorted Source Nodes: [indexes], Original ATen: [aten.argmax]
# Source node to ATen node mapping:
# indexes => argmax
# Graph fragment:
# %argmax : [num_users=1] = call_function[target=torch.ops.aten.argmax.default](args = (%arg0_1, -1), kwargs = {})
triton_poi_fused_argmax_0 = async_compile.triton('triton_poi_fused_argmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_argmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_argmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 > tmp1
tmp3 = tmp0 == tmp1
tmp4 = tmp0 != tmp0
tmp5 = tmp1 != tmp1
tmp6 = tmp4 > tmp5
tmp7 = tmp2 | tmp6
tmp8 = tmp4 & tmp5
tmp9 = tmp3 | tmp8
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = tl.full([1], 1, tl.int64)
tmp12 = tmp10 < tmp11
tmp13 = tmp9 & tmp12
tmp14 = tmp7 | tmp13
tmp15 = tl.where(tmp14, tmp0, tmp1)
tmp16 = tl.where(tmp14, tmp10, tmp11)
tmp18 = tmp15 > tmp17
tmp19 = tmp15 == tmp17
tmp20 = tmp15 != tmp15
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1], 2, tl.int64)
tmp27 = tmp16 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tmp23 | tmp28
tmp30 = tl.where(tmp29, tmp15, tmp17)
tmp31 = tl.where(tmp29, tmp16, tmp26)
tmp33 = tmp30 > tmp32
tmp34 = tmp30 == tmp32
tmp35 = tmp30 != tmp30
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1], 3, tl.int64)
tmp42 = tmp31 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tmp45 = tl.where(tmp44, tmp30, tmp32)
tmp46 = tl.where(tmp44, tmp31, tmp41)
tl.store(out_ptr0 + (x0), tmp46, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ta/ctadkjwokexzjpke5yr2vtq6fgj5h3yogh3lexree3cbxm6sjxwb.py
# Topologically Sorted Source Nodes: [scatter_], Original ATen: [aten.scatter]
# Source node to ATen node mapping:
# scatter_ => scatter_upon_const_tensor
# Graph fragment:
# %scatter_upon_const_tensor : [num_users=1] = call_function[target=torch._inductor.fx_passes.post_grad.scatter_upon_const_tensor](args = (), kwargs = {shape: [64, 4], background_val: 0.0, dtype: torch.float32, dim: 1, selector: %view_1, val: 1})
# %view_6 : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%view_5, [4, 4, 4, 4]), kwargs = {})
triton_poi_fused_scatter_1 = async_compile.triton('triton_poi_fused_scatter_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_scatter_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_scatter_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp1 = x0
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(in_out_ptr0 + (x4), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64)
# Topologically Sorted Source Nodes: [indexes], Original ATen: [aten.argmax]
stream0 = get_raw_stream(0)
triton_poi_fused_argmax_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [scatter_], Original ATen: [aten.scatter]
triton_poi_fused_scatter_1.run(buf2, buf0, 256, grid=grid(256), stream=stream0)
del buf0
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.distributions import RelaxedOneHotCategorical
import torch.nn.parallel
import torch.utils.data
import torch.distributions
def gumbel_softmax_sample(logits: 'torch.Tensor', temperature: 'float'=1.0,
training: 'bool'=True, straight_through: 'bool'=False):
size = logits.size()
if not training:
indexes = logits.argmax(dim=-1)
one_hot = torch.zeros_like(logits).view(-1, size[-1])
one_hot.scatter_(1, indexes.view(-1, 1), 1)
one_hot = one_hot.view(*size)
return one_hot
sample = RelaxedOneHotCategorical(logits=logits, temperature=temperature
).rsample()
if straight_through:
size = sample.size()
indexes = sample.argmax(dim=-1)
hard_sample = torch.zeros_like(sample).view(-1, size[-1])
hard_sample.scatter_(1, indexes.view(-1, 1), 1)
hard_sample = hard_sample.view(*size)
sample = sample + (hard_sample - sample).detach()
return sample
class GumbelSoftmaxLayer(nn.Module):
def __init__(self, temperature: 'float'=1.0, trainable_temperature:
'bool'=False, straight_through: 'bool'=False):
super(GumbelSoftmaxLayer, self).__init__()
self.straight_through = straight_through
if not trainable_temperature:
self.temperature = temperature
else:
self.temperature = torch.nn.Parameter(torch.tensor([temperature
]), requires_grad=True)
def forward(self, logits: 'torch.Tensor'):
return gumbel_softmax_sample(logits, self.temperature, self.
training, self.straight_through)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.distributions import RelaxedOneHotCategorical
import torch.nn.parallel
import torch.utils.data
import torch.distributions
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_argmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp32 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 > tmp1
tmp3 = tmp0 == tmp1
tmp4 = tmp0 != tmp0
tmp5 = tmp1 != tmp1
tmp6 = tmp4 > tmp5
tmp7 = tmp2 | tmp6
tmp8 = tmp4 & tmp5
tmp9 = tmp3 | tmp8
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = tl.full([1], 1, tl.int64)
tmp12 = tmp10 < tmp11
tmp13 = tmp9 & tmp12
tmp14 = tmp7 | tmp13
tmp15 = tl.where(tmp14, tmp0, tmp1)
tmp16 = tl.where(tmp14, tmp10, tmp11)
tmp18 = tmp15 > tmp17
tmp19 = tmp15 == tmp17
tmp20 = tmp15 != tmp15
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1], 2, tl.int64)
tmp27 = tmp16 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tmp23 | tmp28
tmp30 = tl.where(tmp29, tmp15, tmp17)
tmp31 = tl.where(tmp29, tmp16, tmp26)
tmp33 = tmp30 > tmp32
tmp34 = tmp30 == tmp32
tmp35 = tmp30 != tmp30
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1], 3, tl.int64)
tmp42 = tmp31 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tl.where(tmp44, tmp30, tmp32)
tmp46 = tl.where(tmp44, tmp31, tmp41)
tl.store(out_ptr0 + x0, tmp46, xmask)
@triton.jit
def triton_poi_fused_scatter_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = x0
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(in_out_ptr0 + x4, tmp5, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64)
get_raw_stream(0)
triton_poi_fused_argmax_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused_scatter_1[grid(256)](buf2, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf0
return buf2,
def gumbel_softmax_sample(logits: 'torch.Tensor', temperature: 'float'=1.0,
training: 'bool'=True, straight_through: 'bool'=False):
size = logits.size()
if not training:
indexes = logits.argmax(dim=-1)
one_hot = torch.zeros_like(logits).view(-1, size[-1])
one_hot.scatter_(1, indexes.view(-1, 1), 1)
one_hot = one_hot.view(*size)
return one_hot
sample = RelaxedOneHotCategorical(logits=logits, temperature=temperature
).rsample()
if straight_through:
size = sample.size()
indexes = sample.argmax(dim=-1)
hard_sample = torch.zeros_like(sample).view(-1, size[-1])
hard_sample.scatter_(1, indexes.view(-1, 1), 1)
hard_sample = hard_sample.view(*size)
sample = sample + (hard_sample - sample).detach()
return sample
class GumbelSoftmaxLayerNew(nn.Module):
def __init__(self, temperature: 'float'=1.0, trainable_temperature:
'bool'=False, straight_through: 'bool'=False):
super(GumbelSoftmaxLayerNew, self).__init__()
self.straight_through = straight_through
if not trainable_temperature:
self.temperature = temperature
else:
self.temperature = torch.nn.Parameter(torch.tensor([temperature
]), requires_grad=True)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Slowika/GameBias-EmeCom2020 | GumbelSoftmaxLayer | false | 17,957 | [
"MIT"
] | 5 | 5b94c47559f8202bca99c26fc1bcb078dd0509a6 | https://github.com/Slowika/GameBias-EmeCom2020/tree/5b94c47559f8202bca99c26fc1bcb078dd0509a6 |
Hsigmoid | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/qp/cqpk37t4yaux7dlqnx73vgzupb32hkegnu5r54yzdu5dmgxzgdet.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.hardsigmoid]
# Source node to ATen node mapping:
# x => add, clamp_max, clamp_min, div
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 3), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 6), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%clamp_max, 6), kwargs = {})
triton_poi_fused_hardsigmoid_0 = async_compile.triton('triton_poi_fused_hardsigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_hardsigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_hardsigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = 0.16666666666666666
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.hardsigmoid]
stream0 = get_raw_stream(0)
triton_poi_fused_hardsigmoid_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.quantization import QuantStub
from torch.quantization import DeQuantStub
class Hsigmoid(nn.Module):
def __init__(self, add_stub=False):
super().__init__()
self.quant = QuantStub()
self.dequant = DeQuantStub()
self.add_stub = add_stub
self.hsigmoid = nn.Hardsigmoid()
def forward(self, x):
if self.add_stub:
x = self.quant(x)
x = self.hsigmoid(x)
if self.add_stub:
x = self.dequant(x)
return x
def fuse_model(self):
pass
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from torch.quantization import QuantStub
from torch.quantization import DeQuantStub
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_hardsigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = 0.16666666666666666
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_hardsigmoid_0[grid(256)](arg0_1, buf0, 256, XBLOCK
=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class HsigmoidNew(nn.Module):
def __init__(self, add_stub=False):
super().__init__()
self.quant = QuantStub()
self.dequant = DeQuantStub()
self.add_stub = add_stub
self.hsigmoid = nn.Hardsigmoid()
def fuse_model(self):
pass
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| T-head-Semi/tvm | Hsigmoid | false | 17,958 | [
"Apache-2.0"
] | 4 | c1b8e06685c92fb7cacbe989e147b0622aee4503 | https://github.com/T-head-Semi/tvm/tree/c1b8e06685c92fb7cacbe989e147b0622aee4503 |
_TestNetStrided | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/2q/c2qrlz7di27hxszmfa6hc6ofhqc3yvnegk3t333cjhshhfiipiz2.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 288000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 3600) % 20
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/vl/cvlfdgitmfo64xyfcaem22ehr6qdauwehudbmhpzj3qnrwewhnea.py
# Topologically Sorted Source Nodes: [max_pool2d, x], Original ATen: [aten.max_pool2d_with_indices, aten.relu]
# Source node to ATen node mapping:
# max_pool2d => _low_memory_max_pool2d_with_offsets, getitem_1
# x => relu
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%convolution, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%getitem,), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_relu_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 72000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 30
x3 = (xindex // 30)
x2 = (xindex // 18000)
x4 = xindex % 18000
x5 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (120*x3)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (120*x3)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (60 + (2*x0) + (120*x3)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (61 + (2*x0) + (120*x3)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tl.store(out_ptr0 + (x4 + (18048*x2)), tmp15, xmask)
tl.store(out_ptr1 + (x5), tmp18, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/qj/cqjasfx46lzi4c76hyghgr2r4hoy5ksgo6vnlkskgxd5ve5xl36e.py
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 33800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 169) % 50
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/g5/cg56oqcyrepgqykygoiskfwdyh3uylmupqdcciskex4vmiek5ttt.py
# Topologically Sorted Source Nodes: [max_pool2d_1, x_1], Original ATen: [aten.max_pool2d_with_indices, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# max_pool2d_1 => _low_memory_max_pool2d_with_offsets_1, getitem_3
# x_1 => relu_1
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%convolution_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%getitem_2,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 7200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = (xindex // 6) % 6
x5 = (xindex // 36)
x3 = (xindex // 1800)
x4 = xindex % 1800
tmp0 = tl.load(in_ptr0 + ((2*x0) + (26*x1) + (169*x5)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (26*x1) + (169*x5)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (13 + (2*x0) + (26*x1) + (169*x5)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (14 + (2*x0) + (26*x1) + (169*x5)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = 0.0
tmp20 = tmp18 <= tmp19
tl.store(out_ptr0 + (x4 + (1920*x3)), tmp15, xmask)
tl.store(out_ptr1 + (x4 + (1824*x3)), tmp18, xmask)
tl.store(out_ptr2 + (x4 + (1920*x3)), tmp20, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/vz/cvzrcf4u7xr5owe2vpmb7tolqhoamxogseaio5n654a6xmc5mbd2.py
# Topologically Sorted Source Nodes: [max_pool2d_1, x_1, x_2], Original ATen: [aten.max_pool2d_with_indices, aten.relu, aten.view]
# Source node to ATen node mapping:
# max_pool2d_1 => _low_memory_max_pool2d_with_offsets_1
# x_1 => relu_1
# x_2 => view
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%convolution_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%getitem_2,), kwargs = {})
# %view : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%relu_1, [-1, 200]), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_relu_view_4 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_relu_view_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_relu_view_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_view_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 7200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + ((1824*(x0 // 1800)) + (x0 % 1800)), xmask)
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/t7/ct7bq6egumhz7bf4fhsb3mduyemx3xh5jwc2jykv52pi7iqw3g5d.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_3 => relu_2
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_7), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_5 = async_compile.triton('triton_poi_fused_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 18000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 500
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/y5/cy53zwe2ku2ihwlwbyiyh2mqk3jry37ri2hx7ky5exmzx2imeuha.py
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => amax, exp, log, sub, sub_1, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_1, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_1, %amax), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_per_fused__log_softmax_6 = async_compile.triton('triton_per_fused__log_softmax_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_6(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 36
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (10*x0)), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + (10*x0)), tmp12, rmask & xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (20, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (20, ), (1, ))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (50, 20, 5, 5), (500, 25, 5, 1))
assert_size_stride(primals_5, (50, ), (1, ))
assert_size_stride(primals_6, (500, 200), (200, 1))
assert_size_stride(primals_7, (500, ), (1, ))
assert_size_stride(primals_8, (10, 500), (500, 1))
assert_size_stride(primals_9, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 20, 60, 60), (72000, 3600, 60, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 288000, grid=grid(288000), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 20, 30, 30), (18048, 900, 30, 1), torch.int8)
buf3 = empty_strided_cuda((4, 20, 30, 30), (18000, 900, 30, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_pool2d, x], Original ATen: [aten.max_pool2d_with_indices, aten.relu]
triton_poi_fused_max_pool2d_with_indices_relu_1.run(buf1, buf2, buf3, 72000, grid=grid(72000), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 50, 13, 13), (8450, 169, 13, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf5, primals_5, 33800, grid=grid(33800), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((4, 50, 6, 6), (1920, 36, 6, 1), torch.int8)
buf7 = empty_strided_cuda((4, 50, 6, 6), (1824, 36, 6, 1), torch.float32)
buf15 = empty_strided_cuda((4, 50, 6, 6), (1920, 36, 6, 1), torch.bool)
# Topologically Sorted Source Nodes: [max_pool2d_1, x_1], Original ATen: [aten.max_pool2d_with_indices, aten.relu, aten.threshold_backward]
triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3.run(buf5, buf6, buf7, buf15, 7200, grid=grid(7200), stream=stream0)
buf8 = empty_strided_cuda((36, 200), (200, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_pool2d_1, x_1, x_2], Original ATen: [aten.max_pool2d_with_indices, aten.relu, aten.view]
triton_poi_fused_max_pool2d_with_indices_relu_view_4.run(buf7, buf8, 7200, grid=grid(7200), stream=stream0)
del buf7
buf9 = empty_strided_cuda((36, 500), (500, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf8, reinterpret_tensor(primals_6, (200, 500), (1, 200), 0), out=buf9)
buf10 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
triton_poi_fused_relu_5.run(buf10, primals_7, 18000, grid=grid(18000), stream=stream0)
del primals_7
buf11 = empty_strided_cuda((36, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, buf10, reinterpret_tensor(primals_8, (500, 10), (1, 500), 0), alpha=1, beta=1, out=buf11)
del primals_9
buf14 = empty_strided_cuda((36, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
triton_per_fused__log_softmax_6.run(buf11, buf14, 36, 10, grid=grid(36), stream=stream0)
del buf11
return (buf14, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, buf8, buf10, buf14, primals_8, primals_6, buf15, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((20, 1, 5, 5), (25, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((50, 20, 5, 5), (500, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((50, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((500, 200), (200, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((500, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((10, 500), (500, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn
import torch.utils.data
import torch.utils.tensorboard._pytorch_graph
import torch.onnx.symbolic_caffe2
class _TestNetStrided(torch.nn.Module):
def __init__(self):
super(_TestNetStrided, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 20, kernel_size=5)
self.conv2 = torch.nn.Conv2d(20, 50, kernel_size=5, stride=(2, 2))
self.fc1 = torch.nn.Linear(200, 500)
self.fc2 = torch.nn.Linear(500, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2(x), 2))
x = x.view(-1, 200)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn
import torch.utils.data
import torch.utils.tensorboard._pytorch_graph
import torch.onnx.symbolic_caffe2
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 288000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3600 % 20
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_1(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 72000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 30
x3 = xindex // 30
x2 = xindex // 18000
x4 = xindex % 18000
x5 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 120 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 120 * x3), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (60 + 2 * x0 + 120 * x3), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (61 + 2 * x0 + 120 * x3), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tl.store(out_ptr0 + (x4 + 18048 * x2), tmp15, xmask)
tl.store(out_ptr1 + x5, tmp18, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 33800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 169 % 50
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3(in_ptr0,
out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 7200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x5 = xindex // 36
x3 = xindex // 1800
x4 = xindex % 1800
tmp0 = tl.load(in_ptr0 + (2 * x0 + 26 * x1 + 169 * x5), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 26 * x1 + 169 * x5), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (13 + 2 * x0 + 26 * x1 + 169 * x5), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (14 + 2 * x0 + 26 * x1 + 169 * x5), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = 0.0
tmp20 = tmp18 <= tmp19
tl.store(out_ptr0 + (x4 + 1920 * x3), tmp15, xmask)
tl.store(out_ptr1 + (x4 + 1824 * x3), tmp18, xmask)
tl.store(out_ptr2 + (x4 + 1920 * x3), tmp20, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_view_4(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 7200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (1824 * (x0 // 1800) + x0 % 1800), xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 18000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 500
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__log_softmax_6(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 36
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (20, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (20,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (50, 20, 5, 5), (500, 25, 5, 1))
assert_size_stride(primals_5, (50,), (1,))
assert_size_stride(primals_6, (500, 200), (200, 1))
assert_size_stride(primals_7, (500,), (1,))
assert_size_stride(primals_8, (10, 500), (500, 1))
assert_size_stride(primals_9, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 20, 60, 60), (72000, 3600, 60, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(288000)](buf1, primals_2,
288000, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 20, 30, 30), (18048, 900, 30, 1),
torch.int8)
buf3 = empty_strided_cuda((4, 20, 30, 30), (18000, 900, 30, 1),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_relu_1[grid(72000)](buf1,
buf2, buf3, 72000, XBLOCK=512, num_warps=8, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 50, 13, 13), (8450, 169, 13, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(33800)](buf5, primals_5, 33800,
XBLOCK=512, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 50, 6, 6), (1920, 36, 6, 1), torch.int8)
buf7 = empty_strided_cuda((4, 50, 6, 6), (1824, 36, 6, 1), torch.
float32)
buf15 = empty_strided_cuda((4, 50, 6, 6), (1920, 36, 6, 1), torch.bool)
triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3[grid
(7200)](buf5, buf6, buf7, buf15, 7200, XBLOCK=128, num_warps=4,
num_stages=1)
buf8 = empty_strided_cuda((36, 200), (200, 1), torch.float32)
triton_poi_fused_max_pool2d_with_indices_relu_view_4[grid(7200)](buf7,
buf8, 7200, XBLOCK=256, num_warps=4, num_stages=1)
del buf7
buf9 = empty_strided_cuda((36, 500), (500, 1), torch.float32)
extern_kernels.mm(buf8, reinterpret_tensor(primals_6, (200, 500), (
1, 200), 0), out=buf9)
buf10 = buf9
del buf9
triton_poi_fused_relu_5[grid(18000)](buf10, primals_7, 18000,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf11 = empty_strided_cuda((36, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_9, buf10, reinterpret_tensor(primals_8,
(500, 10), (1, 500), 0), alpha=1, beta=1, out=buf11)
del primals_9
buf14 = empty_strided_cuda((36, 10), (10, 1), torch.float32)
triton_per_fused__log_softmax_6[grid(36)](buf11, buf14, 36, 10,
XBLOCK=32, num_warps=4, num_stages=1)
del buf11
return (buf14, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5,
buf6, buf8, buf10, buf14, primals_8, primals_6, buf15)
class _TestNetStridedNew(torch.nn.Module):
def __init__(self):
super(_TestNetStridedNew, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 20, kernel_size=5)
self.conv2 = torch.nn.Conv2d(20, 50, kernel_size=5, stride=(2, 2))
self.fc1 = torch.nn.Linear(200, 500)
self.fc2 = torch.nn.Linear(500, 10)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| Rohan-Chaudhury/aimet | _TestNetStrided | false | 17,959 | [
"BSD-3-Clause"
] | 3 | 1c38cac8cc0fd32dca40ce5e39940805d29f7a4a | https://github.com/Rohan-Chaudhury/aimet/tree/1c38cac8cc0fd32dca40ce5e39940805d29f7a4a |
Divide | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/p2/cp2gakfdo76h7atpdbe44fruke2k6zlfdr5utyfkdfcqp6vanj6c.py
# Topologically Sorted Source Nodes: [div], Original ATen: [aten.div]
# Source node to ATen node mapping:
# div => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = tmp0 / tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [div], Original ATen: [aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_0.run(arg1_1, arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn
import torch.utils.data
import torch.utils.tensorboard._pytorch_graph
import torch.onnx.symbolic_caffe2
class Divide(torch.nn.Module):
""" Divide module for a functional divide"""
def forward(self, x, y):
"""
Forward-pass routine for divide op
"""
return torch.div(x, y)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn
import torch.utils.data
import torch.utils.tensorboard._pytorch_graph
import torch.onnx.symbolic_caffe2
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 / tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK
=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class DivideNew(torch.nn.Module):
""" Divide module for a functional divide"""
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| Rohan-Chaudhury/aimet | Divide | false | 17,960 | [
"BSD-3-Clause"
] | 3 | 1c38cac8cc0fd32dca40ce5e39940805d29f7a4a | https://github.com/Rohan-Chaudhury/aimet/tree/1c38cac8cc0fd32dca40ce5e39940805d29f7a4a |
Hswish | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/q7/cq7mvrstxox2hxsie4qvbguyx5l7ae7el3f7lbjb5eenavbss23r.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.hardswish]
# Source node to ATen node mapping:
# x => add, clamp_max, clamp_min, div, mul
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 3), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 6), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %clamp_max), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 6), kwargs = {})
triton_poi_fused_hardswish_0 = async_compile.triton('triton_poi_fused_hardswish_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_hardswish_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_hardswish_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = tmp0 * tmp6
tmp8 = 0.16666666666666666
tmp9 = tmp7 * tmp8
tl.store(out_ptr0 + (x0), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.hardswish]
stream0 = get_raw_stream(0)
triton_poi_fused_hardswish_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.quantization import QuantStub
from torch.quantization import DeQuantStub
class Hswish(nn.Module):
def __init__(self, add_stub=False):
super().__init__()
self.quant = QuantStub()
self.dequant = DeQuantStub()
self.add_stub = add_stub
self.hswish = nn.Hardswish()
def forward(self, x):
if self.add_stub:
x = self.quant(x)
x = self.hswish(x)
if self.add_stub:
x = self.dequant(x)
return x
def fuse_model(self):
pass
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from torch.quantization import QuantStub
from torch.quantization import DeQuantStub
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_hardswish_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = tmp0 * tmp6
tmp8 = 0.16666666666666666
tmp9 = tmp7 * tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_hardswish_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class HswishNew(nn.Module):
def __init__(self, add_stub=False):
super().__init__()
self.quant = QuantStub()
self.dequant = DeQuantStub()
self.add_stub = add_stub
self.hswish = nn.Hardswish()
def fuse_model(self):
pass
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| T-head-Semi/tvm | Hswish | false | 17,961 | [
"Apache-2.0"
] | 4 | c1b8e06685c92fb7cacbe989e147b0622aee4503 | https://github.com/T-head-Semi/tvm/tree/c1b8e06685c92fb7cacbe989e147b0622aee4503 |
VirtualBatchNormNN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/5u/c5uyterbdkjjyldewxrpavht3nabuvkgn6gschzu6m4silcdd2s3.py
# Topologically Sorted Source Nodes: [mean, pow_1, mean_sq, mul, mul_1, mean_2, mul_2, mul_3, mean_sq_1, add_2, pow_2, sub, std, x, x_1, x_2, x_3], Original ATen: [aten.mean, aten.pow, aten.mul, aten.add, aten.sub, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add_2 => add_2
# mean => mean
# mean_2 => add
# mean_sq => mean_1
# mean_sq_1 => add_1
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# pow_1 => pow_1
# pow_2 => pow_2
# std => sqrt
# sub => sub
# x => sub_1
# x_1 => div
# x_2 => mul_4
# x_3 => add_3
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [0], True), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [0], True), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 0.2), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, 0.8), kwargs = {})
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean_1, 0.2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, 0.8), kwargs = {})
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, 1e-05), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %pow_2), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sub,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %add), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_1, %sqrt), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %primals_4), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_5), kwargs = {})
triton_poi_fused_add_div_mean_mul_pow_sqrt_sub_0 = async_compile.triton('triton_poi_fused_add_div_mean_mul_pow_sqrt_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_pow_sqrt_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_sqrt_sub_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = xindex
x3 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + (x2), xmask)
tmp24 = tl.load(in_ptr2 + (x2), xmask)
tmp27 = tl.load(in_ptr0 + (x2), xmask)
tmp35 = tl.load(in_ptr3 + (x3), xmask, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr4 + (x3), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = 0.2
tmp10 = tmp8 * tmp9
tmp12 = 0.8
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp15 = tmp0 * tmp0
tmp16 = tmp1 * tmp1
tmp17 = tmp15 + tmp16
tmp18 = tmp3 * tmp3
tmp19 = tmp17 + tmp18
tmp20 = tmp5 * tmp5
tmp21 = tmp19 + tmp20
tmp22 = tmp21 / tmp7
tmp23 = tmp22 * tmp9
tmp25 = tmp24 * tmp12
tmp26 = tmp23 + tmp25
tmp28 = tmp27 - tmp14
tmp29 = 1e-05
tmp30 = tmp26 + tmp29
tmp31 = tmp14 * tmp14
tmp32 = tmp30 - tmp31
tmp33 = libdevice.sqrt(tmp32)
tmp34 = tmp28 / tmp33
tmp36 = tmp34 * tmp35
tmp38 = tmp36 + tmp37
tl.store(out_ptr0 + (x2), tmp14, xmask)
tl.store(out_ptr1 + (x2), tmp26, xmask)
tl.store(out_ptr2 + (x2), tmp34, xmask)
tl.store(out_ptr3 + (x2), tmp38, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 4), (4, 1))
assert_size_stride(primals_5, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean, pow_1, mean_sq, mul, mul_1, mean_2, mul_2, mul_3, mean_sq_1, add_2, pow_2, sub, std, x, x_1, x_2, x_3], Original ATen: [aten.mean, aten.pow, aten.mul, aten.add, aten.sub, aten.sqrt, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_pow_sqrt_sub_0.run(primals_1, primals_2, primals_3, primals_4, primals_5, buf0, buf1, buf2, buf3, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_2
del primals_3
del primals_4
del primals_5
return (buf3, buf0, buf1, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import torch
import torch.utils
import torch.utils.data
from torch.nn.parameter import Parameter
from torch.nn.modules import Module
class VirtualBatchNormNN(Module):
"""
Module for Virtual Batch Normalization.
Implementation borrowed and modified from Rafael_Valle's code + help of SimonW from this discussion thread:
https://discuss.pytorch.org/t/parameter-grad-of-conv-weight-is-none-after-virtual-batch-normalization/9036
"""
def __init__(self, num_features: 'int', eps: 'float'=1e-05):
super().__init__()
self.num_features = num_features
self.eps = eps
self.ref_mean = self.register_parameter('ref_mean', None)
self.ref_mean_sq = self.register_parameter('ref_mean_sq', None)
gamma = torch.normal(mean=torch.ones(1, num_features), std=0.02)
self.gamma = Parameter(gamma.float())
self.beta = Parameter(torch.FloatTensor(1, num_features).fill_(0))
def get_stats(self, x):
"""
Calculates mean and mean square for given batch x.
Args:
x: tensor containing batch of activations
Returns:
mean: mean tensor over features
mean_sq: squared mean tensor over features
"""
mean = x.mean(0, keepdim=True)
mean_sq = (x ** 2).mean(0, keepdim=True)
return mean, mean_sq
def forward(self, x, ref_mean: 'None', ref_mean_sq: 'None'):
"""
Forward pass of virtual batch normalization.
Virtual batch normalization require two forward passes
for reference batch and train batch, respectively.
The input parameter is_reference should indicate whether it is a forward pass
for reference batch or not.
Args:
x: input tensor
is_reference(bool): True if forwarding for reference batch
Result:
x: normalized batch tensor
"""
mean, mean_sq = self.get_stats(x)
if ref_mean is None or ref_mean_sq is None:
mean = mean.clone().detach()
mean_sq = mean_sq.clone().detach()
out = self._normalize(x, mean, mean_sq)
else:
batch_size = x.size(0)
new_coeff = 1.0 / (batch_size + 1.0)
old_coeff = 1.0 - new_coeff
mean = new_coeff * mean + old_coeff * ref_mean
mean_sq = new_coeff * mean_sq + old_coeff * ref_mean_sq
out = self._normalize(x, mean, mean_sq)
return out, mean, mean_sq
def _normalize(self, x, mean, mean_sq):
"""
Normalize tensor x given the statistics.
Args:
x: input tensor
mean: mean over features. it has size [1:num_features:]
mean_sq: squared means over features.
Result:
x: normalized batch tensor
"""
assert mean_sq is not None
assert mean is not None
if mean.size(1) != self.num_features:
raise Exception(
'Mean size not equal to number of featuers : given {}, expected {}'
.format(mean.size(1), self.num_features))
if mean_sq.size(1) != self.num_features:
raise Exception(
'Squared mean tensor size not equal to number of features : given {}, expected {}'
.format(mean_sq.size(1), self.num_features))
std = torch.sqrt(self.eps + mean_sq - mean ** 2)
x = x - mean
x = x / std
x = x * self.gamma
x = x + self.beta
return x
def __repr__(self):
return '{name}(num_features={num_features}, eps={eps}'.format(name=
self.__class__.__name__, **self.__dict__)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_features': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch.nn import Module
import torch.utils
import torch.utils.data
from torch.nn.parameter import Parameter
from torch.nn.modules import Module
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_sqrt_sub_0(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = xindex
x3 = xindex % 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + x2, xmask)
tmp24 = tl.load(in_ptr2 + x2, xmask)
tmp27 = tl.load(in_ptr0 + x2, xmask)
tmp35 = tl.load(in_ptr3 + x3, xmask, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr4 + x3, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = 0.2
tmp10 = tmp8 * tmp9
tmp12 = 0.8
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp15 = tmp0 * tmp0
tmp16 = tmp1 * tmp1
tmp17 = tmp15 + tmp16
tmp18 = tmp3 * tmp3
tmp19 = tmp17 + tmp18
tmp20 = tmp5 * tmp5
tmp21 = tmp19 + tmp20
tmp22 = tmp21 / tmp7
tmp23 = tmp22 * tmp9
tmp25 = tmp24 * tmp12
tmp26 = tmp23 + tmp25
tmp28 = tmp27 - tmp14
tmp29 = 1e-05
tmp30 = tmp26 + tmp29
tmp31 = tmp14 * tmp14
tmp32 = tmp30 - tmp31
tmp33 = libdevice.sqrt(tmp32)
tmp34 = tmp28 / tmp33
tmp36 = tmp34 * tmp35
tmp38 = tmp36 + tmp37
tl.store(out_ptr0 + x2, tmp14, xmask)
tl.store(out_ptr1 + x2, tmp26, xmask)
tl.store(out_ptr2 + x2, tmp34, xmask)
tl.store(out_ptr3 + x2, tmp38, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 4), (4, 1))
assert_size_stride(primals_5, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_pow_sqrt_sub_0[grid(256)](primals_1,
primals_2, primals_3, primals_4, primals_5, buf0, buf1, buf2,
buf3, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
del primals_3
del primals_4
del primals_5
return buf3, buf0, buf1, buf2
class VirtualBatchNormNNNew(Module):
"""
Module for Virtual Batch Normalization.
Implementation borrowed and modified from Rafael_Valle's code + help of SimonW from this discussion thread:
https://discuss.pytorch.org/t/parameter-grad-of-conv-weight-is-none-after-virtual-batch-normalization/9036
"""
def __init__(self, num_features: 'int', eps: 'float'=1e-05):
super().__init__()
self.num_features = num_features
self.eps = eps
self.ref_mean = self.register_parameter('ref_mean', None)
self.ref_mean_sq = self.register_parameter('ref_mean_sq', None)
gamma = torch.normal(mean=torch.ones(1, num_features), std=0.02)
self.gamma = Parameter(gamma.float())
self.beta = Parameter(torch.FloatTensor(1, num_features).fill_(0))
def get_stats(self, x):
"""
Calculates mean and mean square for given batch x.
Args:
x: tensor containing batch of activations
Returns:
mean: mean tensor over features
mean_sq: squared mean tensor over features
"""
mean = x.mean(0, keepdim=True)
mean_sq = (x ** 2).mean(0, keepdim=True)
return mean, mean_sq
def _normalize(self, x, mean, mean_sq):
"""
Normalize tensor x given the statistics.
Args:
x: input tensor
mean: mean over features. it has size [1:num_features:]
mean_sq: squared means over features.
Result:
x: normalized batch tensor
"""
assert mean_sq is not None
assert mean is not None
if mean.size(1) != self.num_features:
raise Exception(
'Mean size not equal to number of featuers : given {}, expected {}'
.format(mean.size(1), self.num_features))
if mean_sq.size(1) != self.num_features:
raise Exception(
'Squared mean tensor size not equal to number of features : given {}, expected {}'
.format(mean_sq.size(1), self.num_features))
std = torch.sqrt(self.eps + mean_sq - mean ** 2)
x = x - mean
x = x / std
x = x * self.gamma
x = x + self.beta
return x
def __repr__(self):
return '{name}(num_features={num_features}, eps={eps}'.format(name=
self.__class__.__name__, **self.__dict__)
def forward(self, input_0, input_1, input_2):
primals_4 = self.gamma
primals_5 = self.beta
primals_1 = input_0
primals_2 = input_1
primals_3 = input_2
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1], output[2]
| Silent-Zebra/JEM | VirtualBatchNormNN | false | 17,962 | [
"Apache-2.0"
] | 6 | 33440aff8429d9a24a8ba858d0209f4b48be8e05 | https://github.com/Silent-Zebra/JEM/tree/33440aff8429d9a24a8ba858d0209f4b48be8e05 |
GEGLU | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/ew/cewuvmz2l4xsfqeiimduqjmrtfaz7m752evulitjvus4fngicv5u.py
# Topologically Sorted Source Nodes: [gelu, mul], Original ATen: [aten.gelu, aten.mul]
# Source node to ATen node mapping:
# gelu => add, erf, mul, mul_1, mul_2
# mul => mul_3
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_1, 0.5), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_1, 0.7071067811865476), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem, %mul_2), kwargs = {})
triton_poi_fused_gelu_mul_0 = async_compile.triton('triton_poi_fused_gelu_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gelu_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (2 + x0 + (4*x1)), xmask)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = 0.7071067811865476
tmp5 = tmp1 * tmp4
tmp6 = libdevice.erf(tmp5)
tmp7 = 1.0
tmp8 = tmp6 + tmp7
tmp9 = tmp3 * tmp8
tmp10 = tmp0 * tmp9
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [gelu, mul], Original ATen: [aten.gelu, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_gelu_mul_0.run(arg0_1, buf0, 128, grid=grid(128), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
from torch import nn
class GEGLU(nn.Module):
def forward(self, x):
x, gates = x.chunk(2, dim=-1)
return x * F.gelu(gates)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_gelu_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (2 + x0 + 4 * x1), xmask)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = 0.7071067811865476
tmp5 = tmp1 * tmp4
tmp6 = libdevice.erf(tmp5)
tmp7 = 1.0
tmp8 = tmp6 + tmp7
tmp9 = tmp3 * tmp8
tmp10 = tmp0 * tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_gelu_mul_0[grid(128)](arg0_1, buf0, 128, XBLOCK=
128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class GEGLUNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| TabbenBenchmark/tabben | GEGLU | false | 17,963 | [
"MIT"
] | 5 | d74114afc4b6f67be488ab6bf8ad6fd316fdb888 | https://github.com/TabbenBenchmark/tabben/tree/d74114afc4b6f67be488ab6bf8ad6fd316fdb888 |
Conv3x3 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/tt/ctthexrcgmoykvsyasq7xirwxi6m3yxgjocmuvarikaawgqvdiws.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d]
# Source node to ATen node mapping:
# out => _unsafe_index, _unsafe_index_1
# Graph fragment:
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {})
triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = (xindex // 6) % 6
x2 = (xindex // 36)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/7r/c7r2u57hr54idc3of6lw2ouxuoyy44tzonl7cy4k7awnnjece2kt.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# out_1 => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d]
stream0 = get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0.run(primals_1, buf0, 576, grid=grid(576), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
return (buf2, primals_2, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Conv3x3(nn.Module):
"""Layer to pad and convolve input
"""
def __init__(self, in_channels, out_channels, use_refl=True):
super(Conv3x3, self).__init__()
if use_refl:
self.pad = nn.ReflectionPad2d(1)
else:
self.pad = nn.ZeroPad2d(1)
self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3)
def forward(self, x):
out = self.pad(x)
out = self.conv(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(576)](primals_1, buf0, 576,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(256)](buf2, primals_3, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
return buf2, primals_2, buf0
class Conv3x3New(nn.Module):
"""Layer to pad and convolve input
"""
def __init__(self, in_channels, out_channels, use_refl=True):
super(Conv3x3New, self).__init__()
if use_refl:
self.pad = nn.ReflectionPad2d(1)
else:
self.pad = nn.ZeroPad2d(1)
self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3)
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| Sid1057/sid1057.github.io | Conv3x3 | false | 17,964 | [
"MIT"
] | 4 | 623d1731e308b42b6f86304dcfd671a061b414bf | https://github.com/Sid1057/sid1057.github.io/tree/623d1731e308b42b6f86304dcfd671a061b414bf |
ReinforcedReceiver | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/xc/cxcdvke2ki35o7rw2u6k5kcduvbkx2m7bjfyiqmyun6zjsxvgwpr.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%addmm, %primals_4], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tl.store(out_ptr0 + (x0 + (8*x1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/wc/cwcn4yxvwmndgcgq5j2m4o5e2x3x4ski3ymlrk3juc5y5uci3bcl.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x_2 => gt, mul, where
# Graph fragment:
# %add_tensor_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_6), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_tensor_1, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_tensor_1, 0.01), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add_tensor_1, %mul), kwargs = {})
triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/66/c662wpsh32pb37lzoucnrtlqlpzvmkexbeerttlo5tv4ivk2pqy2.py
# Topologically Sorted Source Nodes: [probs], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# probs => sigmoid
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_8), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_sigmoid_2 = async_compile.triton('triton_poi_fused_sigmoid_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (8, 8), (8, 1))
assert_size_stride(primals_6, (8, ), (1, ))
assert_size_stride(primals_7, (4, 8), (8, 1))
assert_size_stride(primals_8, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
buf0 = reinterpret_tensor(buf2, (4, 4), (8, 1), 0) # alias
# Topologically Sorted Source Nodes: [embedded_bits], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = reinterpret_tensor(buf2, (4, 4), (8, 1), 4) # alias
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_4, buf1, 16, grid=grid(16), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (8, 8), (1, 8), 0), out=buf3)
buf4 = empty_strided_cuda((4, 8), (8, 1), torch.bool)
buf5 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf3, primals_6, buf4, buf5, 32, grid=grid(32), stream=stream0)
del buf3
del primals_6
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf5, reinterpret_tensor(primals_7, (8, 4), (1, 8), 0), out=buf6)
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [probs], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_2.run(buf7, primals_8, 16, grid=grid(16), stream=stream0)
del primals_8
return (buf7, buf7, primals_1, buf2, buf4, buf5, buf7, primals_7, primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((8, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.utils.data
from torch.distributions import Bernoulli
import torch.distributions
class ReinforcedReceiver(nn.Module):
def __init__(self, n_bits, n_hidden):
super(ReinforcedReceiver, self).__init__()
self.emb_column = nn.Linear(n_bits, n_hidden)
self.fc1 = nn.Linear(2 * n_hidden, 2 * n_hidden)
self.fc2 = nn.Linear(2 * n_hidden, n_bits)
def forward(self, embedded_message, bits):
embedded_bits = self.emb_column(bits.float())
x = torch.cat([embedded_bits, embedded_message], dim=1)
x = self.fc1(x)
x = F.leaky_relu(x)
x = self.fc2(x)
probs = x.sigmoid()
distr = Bernoulli(probs=probs)
entropy = distr.entropy()
if self.training:
sample = distr.sample()
else:
sample = (probs > 0.5).float()
log_prob = distr.log_prob(sample).sum(dim=1)
return sample, log_prob, entropy
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'n_bits': 4, 'n_hidden': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
import torch.distributions
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tl.store(out_ptr0 + (x0 + 8 * x1), tmp0, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (8, 8), (8, 1))
assert_size_stride(primals_6, (8,), (1,))
assert_size_stride(primals_7, (4, 8), (8, 1))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
buf0 = reinterpret_tensor(buf2, (4, 4), (8, 1), 0)
extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor(
primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = reinterpret_tensor(buf2, (4, 4), (8, 1), 4)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(16)](primals_4, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (8, 8), (1, 8
), 0), out=buf3)
buf4 = empty_strided_cuda((4, 8), (8, 1), torch.bool)
buf5 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
triton_poi_fused_leaky_relu_1[grid(32)](buf3, primals_6, buf4, buf5,
32, XBLOCK=32, num_warps=1, num_stages=1)
del buf3
del primals_6
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf5, reinterpret_tensor(primals_7, (8, 4), (1, 8
), 0), out=buf6)
buf7 = buf6
del buf6
triton_poi_fused_sigmoid_2[grid(16)](buf7, primals_8, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_8
return buf7, buf7, primals_1, buf2, buf4, buf5, buf7, primals_7, primals_5
class ReinforcedReceiverNew(nn.Module):
def __init__(self, n_bits, n_hidden):
super(ReinforcedReceiverNew, self).__init__()
self.emb_column = nn.Linear(n_bits, n_hidden)
self.fc1 = nn.Linear(2 * n_hidden, 2 * n_hidden)
self.fc2 = nn.Linear(2 * n_hidden, n_bits)
def forward(self, input_0, input_1):
primals_1 = self.emb_column.weight
primals_3 = self.emb_column.bias
primals_5 = self.fc1.weight
primals_6 = self.fc1.bias
primals_7 = self.fc2.weight
primals_8 = self.fc2.bias
primals_2 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0], output[1], output[2]
| Slowika/GameBias-EmeCom2020 | ReinforcedReceiver | false | 17,965 | [
"MIT"
] | 5 | 5b94c47559f8202bca99c26fc1bcb078dd0509a6 | https://github.com/Slowika/GameBias-EmeCom2020/tree/5b94c47559f8202bca99c26fc1bcb078dd0509a6 |
VonmisesLossBiternion | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/nw/cnw3wqm5pzsb7d7jlkmtgvb7a3n24o6ac66ng5oxbhdufyfnouez.py
# Topologically Sorted Source Nodes: [sub, mul, cos_angles_1, score], Original ATen: [aten.sub, aten.mul, aten.exp, aten.rsub]
# Source node to ATen node mapping:
# cos_angles_1 => exp
# mul => mul
# score => sub_1
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, 1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, 4), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %exp), kwargs = {})
triton_poi_fused_exp_mul_rsub_sub_0 = async_compile.triton('triton_poi_fused_exp_mul_rsub_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_mul_rsub_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_exp_mul_rsub_sub_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 - tmp1
tmp3 = 4.0
tmp4 = tmp2 * tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 - tmp5
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [cos_angles], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 1, 4), (4, 1, 1), 0), reinterpret_tensor(arg1_1, (4, 4, 1), (4, 1, 1), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, mul, cos_angles_1, score], Original ATen: [aten.sub, aten.mul, aten.exp, aten.rsub]
stream0 = get_raw_stream(0)
triton_poi_fused_exp_mul_rsub_sub_0.run(buf1, 4, grid=grid(4), stream=stream0)
return (reinterpret_tensor(buf1, (4, ), (1, ), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class VonmisesLossBiternion(torch.nn.Module):
"""Von mises loss function for biternion inputs
see: Beyer et al.: Biternion Nets: Continuous Head Pose Regression from
Discrete Training Labels, GCPR 2015.
"""
def __init__(self, kappa):
super(VonmisesLossBiternion, self).__init__()
self._kappa = kappa
def forward(self, prediction, target):
cos_angles = torch.bmm(prediction[..., None].permute(0, 2, 1),
target[..., None])
cos_angles = torch.exp(self._kappa * (cos_angles - 1))
score = 1 - cos_angles
return score[:, 0, 0]
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'kappa': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_exp_mul_rsub_sub_0(in_out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 - tmp1
tmp3 = 4.0
tmp4 = tmp2 * tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 - tmp5
tl.store(in_out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 1, 4), (4, 1, 1),
0), reinterpret_tensor(arg1_1, (4, 4, 1), (4, 1, 1), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_exp_mul_rsub_sub_0[grid(4)](buf1, 4, XBLOCK=4,
num_warps=1, num_stages=1)
return reinterpret_tensor(buf1, (4,), (1,), 0),
class VonmisesLossBiternionNew(torch.nn.Module):
"""Von mises loss function for biternion inputs
see: Beyer et al.: Biternion Nets: Continuous Head Pose Regression from
Discrete Training Labels, GCPR 2015.
"""
def __init__(self, kappa):
super(VonmisesLossBiternionNew, self).__init__()
self._kappa = kappa
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| TUI-NICR/multi-task-person-perception | VonmisesLossBiternion | false | 17,966 | [
"BSD-3-Clause"
] | 4 | 81666eb42be9522fd726448e82e8bbf04138ffa3 | https://github.com/TUI-NICR/multi-task-person-perception/tree/81666eb42be9522fd726448e82e8bbf04138ffa3 |
MulScalarNegative | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/2w/c2wdt4kzzibfc24mgf6j2taksrhtpzelw3k66roj4y4kardlsul7.py
# Topologically Sorted Source Nodes: [r], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# r => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, -0.3), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = -0.3
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [r], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.quantization import QuantStub
from torch.quantization import DeQuantStub
class MulScalarNegative(nn.Module):
def __init__(self):
super().__init__()
self.float_op = nn.quantized.FloatFunctional()
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
mul = self.float_op.mul_scalar(x, -0.3)
return self.dequant(mul)
def fuse_model(self):
pass
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.quantization import QuantStub
from torch.quantization import DeQuantStub
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = -0.3
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class MulScalarNegativeNew(nn.Module):
def __init__(self):
super().__init__()
self.float_op = nn.quantized.FloatFunctional()
self.quant = QuantStub()
self.dequant = DeQuantStub()
def fuse_model(self):
pass
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| T-head-Semi/tvm | MulScalarNegative | false | 17,967 | [
"Apache-2.0"
] | 4 | c1b8e06685c92fb7cacbe989e147b0622aee4503 | https://github.com/T-head-Semi/tvm/tree/c1b8e06685c92fb7cacbe989e147b0622aee4503 |
InformedSender | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/lj/cljoh642mrhblbqz4ipwrm2lotsbebgxx7pbgrugnexl2ewfvl4y.py
# Topologically Sorted Source Nodes: [h_4], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# h_4 => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze_1, %unsqueeze_3, %unsqueeze_5, %unsqueeze_7], 2), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x0 = xindex % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + (4*x2)), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x0 + (4*x2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 4, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tl.load(in_ptr3 + (x0 + (4*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tl.store(out_ptr0 + (x3), tmp22, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/65/c65iltr2pquzye7migngokhhtu5qf7ejrjl4tw7zz7m5pjbserj7.py
# Topologically Sorted Source Nodes: [h_6], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# h_6 => sigmoid
# Graph fragment:
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {})
triton_poi_fused_sigmoid_1 = async_compile.triton('triton_poi_fused_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/kf/ckfojkadxu3pzjhmlbtcn6s4ufg52wics6gt2nnjnfq7kpsh4knh.py
# Topologically Sorted Source Nodes: [h_9], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# h_9 => sigmoid_1
# Graph fragment:
# %sigmoid_1 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_sigmoid_2 = async_compile.triton('triton_poi_fused_sigmoid_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/vd/cvdcxgajpj4qce4mul4ippu7ofcaej5wcuy7os5qdu26qgoxn6mi.py
# Topologically Sorted Source Nodes: [logits], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# logits => exp, log, sub_1, sum_1
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_4, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %mul_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 1.0), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_1, %log), kwargs = {})
triton_per_fused__log_softmax_3 = async_compile.triton('triton_per_fused__log_softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 128],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_3(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 100
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (100*x0)), rmask & xmask, other=0.0)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(rmask & xmask, tmp3, float("-inf"))
tmp6 = triton_helpers.max2(tmp5, 1)[:, None]
tmp7 = tmp2 - tmp6
tmp8 = tmp7 * tmp1
tmp9 = tl_math.exp(tmp8)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.where(rmask & xmask, tmp10, 0)
tmp13 = tl.sum(tmp12, 1)[:, None]
tmp14 = tl_math.log(tmp13)
tmp15 = tmp8 - tmp14
tl.store(out_ptr2 + (r1 + (100*x0)), tmp15, rmask & xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 1, 4, 1), (4, 4, 1, 1))
assert_size_stride(primals_4, (1, 1, 4, 1), (4, 4, 1, 1))
assert_size_stride(primals_5, (100, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_i], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_i_3], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 16), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_i_6], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 32), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_i_9], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 48), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf3)
del primals_2
buf4 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_4], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(buf0, buf1, buf2, buf3, buf4, 64, grid=grid(64), stream=stream0)
del buf0
del buf1
del buf2
del buf3
# Topologically Sorted Source Nodes: [h_5], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf4, primals_3, stride=(4, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 1, 4), (16, 4, 4, 1))
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [h_6], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_1.run(buf6, 64, grid=grid(64), stream=stream0)
# Topologically Sorted Source Nodes: [h_8], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(reinterpret_tensor(buf6, (4, 1, 4, 4), (16, 4, 4, 1), 0), primals_4, stride=(4, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 1, 1, 4), (4, 4, 4, 1))
buf8 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [h_9], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_2.run(buf8, 16, grid=grid(16), stream=stream0)
buf9 = empty_strided_cuda((4, 100), (100, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_12], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf8, (4, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 100), (1, 4), 0), out=buf9)
buf12 = empty_strided_cuda((4, 100), (100, 1), torch.float32)
# Topologically Sorted Source Nodes: [logits], Original ATen: [aten._log_softmax]
triton_per_fused__log_softmax_3.run(buf9, buf12, 4, 100, grid=grid(4), stream=stream0)
del buf9
return (buf12, primals_3, primals_4, reinterpret_tensor(primals_1, (4, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (4, 1), 16), reinterpret_tensor(primals_1, (4, 4), (4, 1), 32), reinterpret_tensor(primals_1, (4, 4), (4, 1), 48), buf4, buf6, buf8, buf12, primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 4, 1), (4, 4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 1, 4, 1), (4, 4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((100, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.utils.data
import torch.distributions
class InformedSender(nn.Module):
def __init__(self, game_size, feat_size, embedding_size, hidden_size,
vocab_size=100, temp=1.0):
super(InformedSender, self).__init__()
self.game_size = game_size
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.temp = temp
self.lin1 = nn.Linear(feat_size, embedding_size, bias=False)
self.conv2 = nn.Conv2d(1, hidden_size, kernel_size=(game_size, 1),
stride=(game_size, 1), bias=False)
self.conv3 = nn.Conv2d(1, 1, kernel_size=(hidden_size, 1), stride=(
hidden_size, 1), bias=False)
self.lin4 = nn.Linear(embedding_size, vocab_size, bias=False)
def forward(self, x, return_embeddings=False):
emb = self.return_embeddings(x)
h = self.conv2(emb)
h = torch.sigmoid(h)
h = h.transpose(1, 2)
h = self.conv3(h)
h = torch.sigmoid(h)
h = h.squeeze(dim=1)
h = h.squeeze(dim=1)
h = self.lin4(h)
h = h.mul(1.0 / self.temp)
logits = F.log_softmax(h, dim=1)
return logits
def return_embeddings(self, x):
embs = []
for i in range(self.game_size):
h = x[i]
if len(h.size()) == 3:
h = h.squeeze(dim=-1)
h_i = self.lin1(h)
h_i = h_i.unsqueeze(dim=1)
h_i = h_i.unsqueeze(dim=1)
embs.append(h_i)
h = torch.cat(embs, dim=2)
return h
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'game_size': 4, 'feat_size': 4, 'embedding_size': 4,
'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
import torch.distributions
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 4 * x2), tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x0 + 4 * x2), tmp14 & xmask, eviction_policy
='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1], 4, tl.int64)
tmp19 = tl.load(in_ptr3 + (x0 + 4 * x2), tmp16 & xmask, eviction_policy
='evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tl.store(out_ptr0 + x3, tmp22, xmask)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_per_fused__log_softmax_3(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 100
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 100 * x0), rmask & xmask, other=0.0)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(rmask & xmask, tmp3, float('-inf'))
tmp6 = triton_helpers.max2(tmp5, 1)[:, None]
tmp7 = tmp2 - tmp6
tmp8 = tmp7 * tmp1
tmp9 = tl_math.exp(tmp8)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.where(rmask & xmask, tmp10, 0)
tmp13 = tl.sum(tmp12, 1)[:, None]
tmp14 = tl_math.log(tmp13)
tmp15 = tmp8 - tmp14
tl.store(out_ptr2 + (r1 + 100 * x0), tmp15, rmask & xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 1, 4, 1), (4, 4, 1, 1))
assert_size_stride(primals_4, (1, 1, 4, 1), (4, 4, 1, 1))
assert_size_stride(primals_5, (100, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 16),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 32),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 48),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf3)
del primals_2
buf4 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(64)](buf0, buf1, buf2, buf3, buf4, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf0
del buf1
del buf2
del buf3
buf5 = extern_kernels.convolution(buf4, primals_3, stride=(4, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 1, 4), (16, 4, 4, 1))
buf6 = buf5
del buf5
triton_poi_fused_sigmoid_1[grid(64)](buf6, 64, XBLOCK=64, num_warps
=1, num_stages=1)
buf7 = extern_kernels.convolution(reinterpret_tensor(buf6, (4, 1, 4,
4), (16, 4, 4, 1), 0), primals_4, stride=(4, 1), padding=(0, 0),
dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf7, (4, 1, 1, 4), (4, 4, 4, 1))
buf8 = buf7
del buf7
triton_poi_fused_sigmoid_2[grid(16)](buf8, 16, XBLOCK=16, num_warps
=1, num_stages=1)
buf9 = empty_strided_cuda((4, 100), (100, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf8, (4, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 100), (1, 4), 0), out=buf9)
buf12 = empty_strided_cuda((4, 100), (100, 1), torch.float32)
triton_per_fused__log_softmax_3[grid(4)](buf9, buf12, 4, 100,
XBLOCK=1, num_warps=2, num_stages=1)
del buf9
return buf12, primals_3, primals_4, reinterpret_tensor(primals_1, (4, 4
), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (4, 1), 16
), reinterpret_tensor(primals_1, (4, 4), (4, 1), 32
), reinterpret_tensor(primals_1, (4, 4), (4, 1), 48
), buf4, buf6, buf8, buf12, primals_5
class InformedSenderNew(nn.Module):
def __init__(self, game_size, feat_size, embedding_size, hidden_size,
vocab_size=100, temp=1.0):
super(InformedSenderNew, self).__init__()
self.game_size = game_size
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.temp = temp
self.lin1 = nn.Linear(feat_size, embedding_size, bias=False)
self.conv2 = nn.Conv2d(1, hidden_size, kernel_size=(game_size, 1),
stride=(game_size, 1), bias=False)
self.conv3 = nn.Conv2d(1, 1, kernel_size=(hidden_size, 1), stride=(
hidden_size, 1), bias=False)
self.lin4 = nn.Linear(embedding_size, vocab_size, bias=False)
def return_embeddings(self, x):
embs = []
for i in range(self.game_size):
h = x[i]
if len(h.size()) == 3:
h = h.squeeze(dim=-1)
h_i = self.lin1(h)
h_i = h_i.unsqueeze(dim=1)
h_i = h_i.unsqueeze(dim=1)
embs.append(h_i)
h = torch.cat(embs, dim=2)
return h
def forward(self, input_0):
primals_2 = self.lin1.weight
primals_3 = self.conv2.weight
primals_4 = self.conv3.weight
primals_5 = self.lin4.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| Slowika/GameBias-EmeCom2020 | InformedSender | false | 17,968 | [
"MIT"
] | 5 | 5b94c47559f8202bca99c26fc1bcb078dd0509a6 | https://github.com/Slowika/GameBias-EmeCom2020/tree/5b94c47559f8202bca99c26fc1bcb078dd0509a6 |
UpsamplingBilinear | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/cj/ccjwhzrjpovo2dm2l2dp7owcqvxz4tygdgqlonl3ej4up2jxespf.py
# Topologically Sorted Source Nodes: [upsample], Original ATen: [aten._to_copy, aten.arange, aten.mul, aten.clamp, aten._unsafe_index, aten.sub, aten.add]
# Source node to ATen node mapping:
# upsample => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add_2, add_3, add_4, clamp_max_2, clamp_max_3, clamp_min_1, clamp_min_2, clamp_min_3, convert_element_type_1, convert_element_type_2, convert_element_type_3, iota_1, mul_1, mul_2, mul_3, mul_4, sub, sub_1, sub_2, sub_3, sub_4
# Graph fragment:
# %convert_element_type_1 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {})
# %iota_1 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (8,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type_2 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_1, torch.float32), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_2, 0.42857142857142855), kwargs = {})
# %clamp_min_1 : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul_1, 0.0), kwargs = {})
# %convert_element_type_3 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%clamp_min_1, torch.int64), kwargs = {})
# %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max, %clamp_max_1]), kwargs = {})
# %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_1, %convert_element_type_3), kwargs = {})
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {})
# %clamp_max_2 : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %clamp_max_2), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_3), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {})
# %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %clamp_max_2), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_2), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %add_2), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %convert_element_type_1), kwargs = {})
# %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_3, 0.0), kwargs = {})
# %clamp_max_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_3, 1.0), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_3), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %mul_4), kwargs = {})
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0 = async_compile.triton('triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 8) % 8
x0 = xindex % 8
x2 = (xindex // 64)
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.42857142857142855
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 3, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tmp11 = x0
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp12 * tmp2
tmp14 = triton_helpers.maximum(tmp13, tmp4)
tmp15 = tmp14.to(tl.int32)
tmp16 = tl.load(in_ptr0 + (tmp15 + (4*tmp10) + (16*x2)), xmask, eviction_policy='evict_last')
tmp17 = tmp15 + tmp7
tmp18 = triton_helpers.minimum(tmp17, tmp9)
tmp19 = tl.load(in_ptr0 + (tmp18 + (4*tmp10) + (16*x2)), xmask, eviction_policy='evict_last')
tmp20 = tmp19 - tmp16
tmp21 = tmp15.to(tl.float32)
tmp22 = tmp14 - tmp21
tmp23 = triton_helpers.maximum(tmp22, tmp4)
tmp24 = 1.0
tmp25 = triton_helpers.minimum(tmp23, tmp24)
tmp26 = tmp20 * tmp25
tmp27 = tmp16 + tmp26
tmp28 = tl.load(in_ptr0 + (tmp15 + (4*tmp6) + (16*x2)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (tmp18 + (4*tmp6) + (16*x2)), xmask, eviction_policy='evict_last')
tmp30 = tmp29 - tmp28
tmp31 = tmp30 * tmp25
tmp32 = tmp28 + tmp31
tmp33 = tmp27 - tmp32
tmp34 = tmp6.to(tl.float32)
tmp35 = tmp5 - tmp34
tmp36 = triton_helpers.maximum(tmp35, tmp4)
tmp37 = triton_helpers.minimum(tmp36, tmp24)
tmp38 = tmp33 * tmp37
tmp39 = tmp32 + tmp38
tl.store(in_out_ptr0 + (x4), tmp39, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [upsample], Original ATen: [aten._to_copy, aten.arange, aten.mul, aten.clamp, aten._unsafe_index, aten.sub, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0.run(buf1, arg0_1, 1024, grid=grid(1024), stream=stream0)
del arg0_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.quantization import QuantStub
from torch.quantization import DeQuantStub
class UpsamplingBilinear(nn.Module):
def __init__(self):
super().__init__()
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
upsample = nn.functional.interpolate(x, scale_factor=2, mode=
'bilinear', align_corners=True)
return self.dequant(upsample)
def fuse_model(self):
pass
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from torch.quantization import QuantStub
from torch.quantization import DeQuantStub
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(
in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 8 % 8
x0 = xindex % 8
x2 = xindex // 64
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.42857142857142855
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 3, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tmp11 = x0
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp12 * tmp2
tmp14 = triton_helpers.maximum(tmp13, tmp4)
tmp15 = tmp14.to(tl.int32)
tmp16 = tl.load(in_ptr0 + (tmp15 + 4 * tmp10 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp17 = tmp15 + tmp7
tmp18 = triton_helpers.minimum(tmp17, tmp9)
tmp19 = tl.load(in_ptr0 + (tmp18 + 4 * tmp10 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp20 = tmp19 - tmp16
tmp21 = tmp15.to(tl.float32)
tmp22 = tmp14 - tmp21
tmp23 = triton_helpers.maximum(tmp22, tmp4)
tmp24 = 1.0
tmp25 = triton_helpers.minimum(tmp23, tmp24)
tmp26 = tmp20 * tmp25
tmp27 = tmp16 + tmp26
tmp28 = tl.load(in_ptr0 + (tmp15 + 4 * tmp6 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (tmp18 + 4 * tmp6 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp30 = tmp29 - tmp28
tmp31 = tmp30 * tmp25
tmp32 = tmp28 + tmp31
tmp33 = tmp27 - tmp32
tmp34 = tmp6.to(tl.float32)
tmp35 = tmp5 - tmp34
tmp36 = triton_helpers.maximum(tmp35, tmp4)
tmp37 = triton_helpers.minimum(tmp36, tmp24)
tmp38 = tmp33 * tmp37
tmp39 = tmp32 + tmp38
tl.store(in_out_ptr0 + x4, tmp39, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid
(1024)](buf1, arg0_1, 1024, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf1,
class UpsamplingBilinearNew(nn.Module):
def __init__(self):
super().__init__()
self.quant = QuantStub()
self.dequant = DeQuantStub()
def fuse_model(self):
pass
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| T-head-Semi/tvm | UpsamplingBilinear | false | 17,969 | [
"Apache-2.0"
] | 4 | c1b8e06685c92fb7cacbe989e147b0622aee4503 | https://github.com/T-head-Semi/tvm/tree/c1b8e06685c92fb7cacbe989e147b0622aee4503 |
SmallMnistNoDropoutWithPassThrough | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/ul/culfeun7wmrsya4fv22he6lp67htodmnmbg4achhwyybqeqe3ljt.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 144000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 3600) % 10
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/5l/c5lfmsfuebobasgmahlh54mpbwcpvkqq3xtpdx5es3dj6cpvjw77.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => convolution_1
# x_2 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 250880
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = (xindex // 3136) % 20
x0 = xindex % 3136
x3 = (xindex // 3136)
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x0 + (3200*x3)), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ov/covxr2juktxjwdz3javvk6wtjtgx3iticwwsytpotrpg5czkgast.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_4 => relu_2
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_7), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_2 = async_compile.triton('triton_poi_fused_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 39200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 50
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/i7/ci7jltkvpyv54u2b2qwcmikhqlfg7bmual2jgwjmrckawmdx6w76.py
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => amax, exp, log, sub, sub_1, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_1, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_1, %amax), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_per_fused__log_softmax_3 = async_compile.triton('triton_per_fused__log_softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1024, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_3(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 784
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (10*x0)), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + (10*x0)), tmp12, rmask & xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (10, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (10, ), (1, ))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (20, 10, 5, 5), (250, 25, 5, 1))
assert_size_stride(primals_5, (20, ), (1, ))
assert_size_stride(primals_6, (50, 320), (320, 1))
assert_size_stride(primals_7, (50, ), (1, ))
assert_size_stride(primals_8, (10, 50), (50, 1))
assert_size_stride(primals_9, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 10, 60, 60), (36000, 3600, 60, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 144000, grid=grid(144000), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 20, 56, 56), (62720, 3136, 56, 1))
buf3 = buf2; del buf2 # reuse
buf10 = empty_strided_cuda((4, 20, 56, 56), (64000, 3200, 56, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_1.run(buf3, primals_5, buf10, 250880, grid=grid(250880), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((784, 50), (50, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (784, 320), (320, 1), 0), reinterpret_tensor(primals_6, (320, 50), (1, 320), 0), out=buf4)
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
triton_poi_fused_relu_2.run(buf5, primals_7, 39200, grid=grid(39200), stream=stream0)
del primals_7
buf6 = empty_strided_cuda((784, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, buf5, reinterpret_tensor(primals_8, (50, 10), (1, 50), 0), alpha=1, beta=1, out=buf6)
del primals_9
buf9 = empty_strided_cuda((784, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
triton_per_fused__log_softmax_3.run(buf6, buf9, 784, 10, grid=grid(784), stream=stream0)
del buf6
return (buf9, primals_1, primals_3, primals_4, buf1, reinterpret_tensor(buf3, (784, 320), (320, 1), 0), buf5, buf9, primals_8, primals_6, buf10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((10, 1, 5, 5), (25, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((20, 10, 5, 5), (250, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((50, 320), (320, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((50, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((10, 50), (50, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn
import torch.utils.data
import torch.utils.tensorboard._pytorch_graph
import torch.onnx.symbolic_caffe2
class PassThroughOp(torch.nn.Module):
"""
This is a pass-through op, used for purpose of making an op a no-op
"""
def forward(self, inputx):
return inputx
class SmallMnistNoDropoutWithPassThrough(nn.Module):
def __init__(self):
super(SmallMnistNoDropoutWithPassThrough, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.pt1 = PassThroughOp()
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.pt2 = PassThroughOp()
self.relu2 = nn.ReLU()
self.fc1 = nn.Linear(320, 50)
self.relu3 = nn.ReLU()
self.fc2 = nn.Linear(50, 10)
self.log_softmax = nn.LogSoftmax(dim=1)
def forward(self, x):
x = self.relu1(self.pt1(self.conv1(x)))
x = self.conv2(x)
x = self.relu2(self.pt2(x))
x = x.view(-1, 320)
x = self.relu3(self.fc1(x))
x = self.fc2(x)
return self.log_softmax(x)
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn
import torch.utils.data
import torch.utils.tensorboard._pytorch_graph
import torch.onnx.symbolic_caffe2
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 144000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3600 % 10
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 250880
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = xindex // 3136 % 20
x0 = xindex % 3136
x3 = xindex // 3136
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x0 + 3200 * x3), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 39200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 50
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__log_softmax_3(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 784
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (10, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (10,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (20, 10, 5, 5), (250, 25, 5, 1))
assert_size_stride(primals_5, (20,), (1,))
assert_size_stride(primals_6, (50, 320), (320, 1))
assert_size_stride(primals_7, (50,), (1,))
assert_size_stride(primals_8, (10, 50), (50, 1))
assert_size_stride(primals_9, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 10, 60, 60), (36000, 3600, 60, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(144000)](buf1, primals_2,
144000, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 20, 56, 56), (62720, 3136, 56, 1))
buf3 = buf2
del buf2
buf10 = empty_strided_cuda((4, 20, 56, 56), (64000, 3200, 56, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_1[grid(250880)](
buf3, primals_5, buf10, 250880, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_5
buf4 = empty_strided_cuda((784, 50), (50, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (784, 320), (320, 1), 0),
reinterpret_tensor(primals_6, (320, 50), (1, 320), 0), out=buf4)
buf5 = buf4
del buf4
triton_poi_fused_relu_2[grid(39200)](buf5, primals_7, 39200, XBLOCK
=512, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((784, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_9, buf5, reinterpret_tensor(primals_8,
(50, 10), (1, 50), 0), alpha=1, beta=1, out=buf6)
del primals_9
buf9 = empty_strided_cuda((784, 10), (10, 1), torch.float32)
triton_per_fused__log_softmax_3[grid(784)](buf6, buf9, 784, 10,
XBLOCK=32, num_warps=4, num_stages=1)
del buf6
return buf9, primals_1, primals_3, primals_4, buf1, reinterpret_tensor(buf3
, (784, 320), (320, 1), 0), buf5, buf9, primals_8, primals_6, buf10
class PassThroughOp(torch.nn.Module):
"""
This is a pass-through op, used for purpose of making an op a no-op
"""
def forward(self, inputx):
return inputx
class SmallMnistNoDropoutWithPassThroughNew(nn.Module):
def __init__(self):
super(SmallMnistNoDropoutWithPassThroughNew, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.pt1 = PassThroughOp()
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.pt2 = PassThroughOp()
self.relu2 = nn.ReLU()
self.fc1 = nn.Linear(320, 50)
self.relu3 = nn.ReLU()
self.fc2 = nn.Linear(50, 10)
self.log_softmax = nn.LogSoftmax(dim=1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| Rohan-Chaudhury/aimet | SmallMnistNoDropoutWithPassThrough | false | 17,970 | [
"BSD-3-Clause"
] | 3 | 1c38cac8cc0fd32dca40ce5e39940805d29f7a4a | https://github.com/Rohan-Chaudhury/aimet/tree/1c38cac8cc0fd32dca40ce5e39940805d29f7a4a |
CovSepBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/3r/c3runacu4pkgvdlmsngxxodg4pf6xmzvzxpf7xzbkjc3ay27rdj3.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_1 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution, %primals_3, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf2, primals_4, 256, grid=grid(256), stream=stream0)
del primals_4
return (buf2, primals_1, primals_2, primals_3, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1, 5, 5), (25, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as M
def DepthwiseConv(in_channels, kernel_size, stride, padding):
return M.Conv2d(in_channels=in_channels, out_channels=in_channels,
kernel_size=kernel_size, stride=stride, padding=padding, groups=
in_channels, bias=False)
def PointwiseConv(in_channels, out_channels):
return M.Conv2d(in_channels=in_channels, out_channels=out_channels,
kernel_size=1, padding=0, bias=True)
class CovSepBlock(M.Module):
def __init__(self, in_channels, out_channels, kernel_size=5, stride=1,
padding=2):
super().__init__()
self.dc = DepthwiseConv(in_channels, kernel_size, stride=stride,
padding=padding)
self.pc = PointwiseConv(in_channels, out_channels)
def forward(self, x):
x = self.dc(x)
x = self.pc(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as M
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(256)](buf2, primals_4, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_4
return buf2, primals_1, primals_2, primals_3, buf0
def DepthwiseConv(in_channels, kernel_size, stride, padding):
return M.Conv2d(in_channels=in_channels, out_channels=in_channels,
kernel_size=kernel_size, stride=stride, padding=padding, groups=
in_channels, bias=False)
def PointwiseConv(in_channels, out_channels):
return M.Conv2d(in_channels=in_channels, out_channels=out_channels,
kernel_size=1, padding=0, bias=True)
class CovSepBlockNew(M.Module):
def __init__(self, in_channels, out_channels, kernel_size=5, stride=1,
padding=2):
super().__init__()
self.dc = DepthwiseConv(in_channels, kernel_size, stride=stride,
padding=padding)
self.pc = PointwiseConv(in_channels, out_channels)
def forward(self, input_0):
primals_1 = self.dc.weight
primals_3 = self.pc.weight
primals_4 = self.pc.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| SuperbTUM/RAW-image-denoising | CovSepBlock | false | 17,971 | [
"MIT"
] | 4 | 9f81be8da6a576f641022707d98b8c37f5c599ab | https://github.com/SuperbTUM/RAW-image-denoising/tree/9f81be8da6a576f641022707d98b8c37f5c599ab |
Upsample | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/cj/ccjwhzrjpovo2dm2l2dp7owcqvxz4tygdgqlonl3ej4up2jxespf.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten._unsafe_index, aten.sub, aten.add]
# Source node to ATen node mapping:
# x => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add_2, add_3, add_4, clamp_max_2, clamp_max_3, clamp_min, clamp_min_2, clamp_min_3, convert_element_type, convert_element_type_1, convert_element_type_3, iota, mul, mul_2, mul_3, mul_4, sub, sub_1, sub_2, sub_3, sub_4
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (8,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota, torch.float32), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type, 0.42857142857142855), kwargs = {})
# %clamp_min : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul, 0.0), kwargs = {})
# %convert_element_type_1 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {})
# %convert_element_type_3 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%clamp_min, torch.int64), kwargs = {})
# %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {})
# %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max, %clamp_max_1]), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min, %convert_element_type_3), kwargs = {})
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {})
# %clamp_max_2 : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %clamp_max_2), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_2), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %clamp_max_2), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_3), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %convert_element_type_1), kwargs = {})
# %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_3, 0.0), kwargs = {})
# %clamp_max_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_3, 1.0), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %add_2), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_3), kwargs = {})
# %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %mul_4), kwargs = {})
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0 = async_compile.triton('triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 8) % 8
x0 = xindex % 8
x2 = (xindex // 64)
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.42857142857142855
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 3, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tmp11 = x0
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp12 * tmp2
tmp14 = triton_helpers.maximum(tmp13, tmp4)
tmp15 = tmp14.to(tl.int32)
tmp16 = tl.load(in_ptr0 + (tmp15 + (4*tmp10) + (16*x2)), xmask, eviction_policy='evict_last')
tmp17 = tmp15 + tmp7
tmp18 = triton_helpers.minimum(tmp17, tmp9)
tmp19 = tl.load(in_ptr0 + (tmp18 + (4*tmp10) + (16*x2)), xmask, eviction_policy='evict_last')
tmp20 = tmp19 - tmp16
tmp21 = tmp15.to(tl.float32)
tmp22 = tmp14 - tmp21
tmp23 = triton_helpers.maximum(tmp22, tmp4)
tmp24 = 1.0
tmp25 = triton_helpers.minimum(tmp23, tmp24)
tmp26 = tmp20 * tmp25
tmp27 = tmp16 + tmp26
tmp28 = tl.load(in_ptr0 + (tmp15 + (4*tmp6) + (16*x2)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (tmp18 + (4*tmp6) + (16*x2)), xmask, eviction_policy='evict_last')
tmp30 = tmp29 - tmp28
tmp31 = tmp30 * tmp25
tmp32 = tmp28 + tmp31
tmp33 = tmp27 - tmp32
tmp34 = tmp6.to(tl.float32)
tmp35 = tmp5 - tmp34
tmp36 = triton_helpers.maximum(tmp35, tmp4)
tmp37 = triton_helpers.minimum(tmp36, tmp24)
tmp38 = tmp33 * tmp37
tmp39 = tmp32 + tmp38
tl.store(in_out_ptr0 + (x4), tmp39, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/fr/cfr26uorpdbqjhhrw25kipixbtj7e3p2iw55hdsrscl44kvrdeyp.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_1 => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%add_4, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 64) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten._unsafe_index, aten.sub, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0.run(buf1, primals_1, 1024, grid=grid(1024), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 8, 8), (256, 64, 8, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf3, primals_3, 1024, grid=grid(1024), stream=stream0)
del primals_3
return (buf3, primals_2, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as M
class Upsample(M.Module):
def __init__(self, in_channels, out_channels):
super(Upsample, self).__init__()
self.upsample = M.Upsample(scale_factor=2, mode='bilinear',
align_corners=True)
self.ordinaryConv = M.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=1)
def forward(self, x):
x = self.upsample(x)
x = self.ordinaryConv(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as M
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(
in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 8 % 8
x0 = xindex % 8
x2 = xindex // 64
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.42857142857142855
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 3, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tmp11 = x0
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp12 * tmp2
tmp14 = triton_helpers.maximum(tmp13, tmp4)
tmp15 = tmp14.to(tl.int32)
tmp16 = tl.load(in_ptr0 + (tmp15 + 4 * tmp10 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp17 = tmp15 + tmp7
tmp18 = triton_helpers.minimum(tmp17, tmp9)
tmp19 = tl.load(in_ptr0 + (tmp18 + 4 * tmp10 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp20 = tmp19 - tmp16
tmp21 = tmp15.to(tl.float32)
tmp22 = tmp14 - tmp21
tmp23 = triton_helpers.maximum(tmp22, tmp4)
tmp24 = 1.0
tmp25 = triton_helpers.minimum(tmp23, tmp24)
tmp26 = tmp20 * tmp25
tmp27 = tmp16 + tmp26
tmp28 = tl.load(in_ptr0 + (tmp15 + 4 * tmp6 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (tmp18 + 4 * tmp6 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp30 = tmp29 - tmp28
tmp31 = tmp30 * tmp25
tmp32 = tmp28 + tmp31
tmp33 = tmp27 - tmp32
tmp34 = tmp6.to(tl.float32)
tmp35 = tmp5 - tmp34
tmp36 = triton_helpers.maximum(tmp35, tmp4)
tmp37 = triton_helpers.minimum(tmp36, tmp24)
tmp38 = tmp33 * tmp37
tmp39 = tmp32 + tmp38
tl.store(in_out_ptr0 + x4, tmp39, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 64 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid
(1024)](buf1, primals_1, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_1
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 8, 8), (256, 64, 8, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(1024)](buf3, primals_3, 1024,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
return buf3, primals_2, buf1
class UpsampleNew(M.Module):
def __init__(self, in_channels, out_channels):
super(UpsampleNew, self).__init__()
self.upsample = M.Upsample(scale_factor=2, mode='bilinear',
align_corners=True)
self.ordinaryConv = M.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=1)
def forward(self, input_0):
primals_2 = self.ordinaryConv.weight
primals_3 = self.ordinaryConv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| SuperbTUM/RAW-image-denoising | Upsample | false | 17,972 | [
"MIT"
] | 4 | 9f81be8da6a576f641022707d98b8c37f5c599ab | https://github.com/SuperbTUM/RAW-image-denoising/tree/9f81be8da6a576f641022707d98b8c37f5c599ab |
DownSample | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/wi/cwiqbpoqi6m2vpqbo7yhlu6k5kw2cosffol744o35qhjqcmj57rw.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_1 => convolution_1
# x_2 => relu
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution, %primals_3, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tl.store(in_out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/hh/chh2vels25xtylq5bzfebl6eoopomnq7wro6ud6rt4mdtjm7q6ah.py
# Topologically Sorted Source Nodes: [x_4, x_6, x_7, x_8], Original ATen: [aten.convolution, aten.add, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_4 => convolution_3
# x_6 => convolution_5
# x_7 => add
# x_8 => relu_1
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_2, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_4, %primals_9, %primals_10, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_3, %convolution_5), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_add_convolution_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_add_convolution_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x3), xmask)
tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = tl.full([1], 0, tl.int32)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp9 = 0.0
tmp10 = tmp8 <= tmp9
tl.store(in_out_ptr0 + (x3), tmp8, xmask)
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (1, ), (1, ))
assert_size_stride(primals_5, (1, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_6, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_9, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_10, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1))
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 2, 2), (4, 4, 2, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf2, primals_4, 16, grid=grid(16), stream=stream0)
del primals_4
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 1, 2, 2), (4, 4, 2, 1))
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 2, 2), (16, 4, 2, 1))
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(primals_1, primals_8, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf5, (4, 4, 2, 2), (16, 4, 2, 1))
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_9, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 2, 2), (16, 4, 2, 1))
buf7 = buf4; del buf4 # reuse
buf8 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_4, x_6, x_7, x_8], Original ATen: [aten.convolution, aten.add, aten.relu, aten.threshold_backward]
triton_poi_fused_add_convolution_relu_threshold_backward_1.run(buf7, primals_7, buf6, primals_10, buf8, 64, grid=grid(64), stream=stream0)
del buf6
del primals_10
del primals_7
return (buf7, primals_1, primals_2, primals_3, primals_5, primals_6, primals_8, primals_9, buf0, buf2, buf3, buf5, buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 1, 5, 5), (25, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, 1, 5, 5), (25, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as M
def DepthwiseConv(in_channels, kernel_size, stride, padding):
return M.Conv2d(in_channels=in_channels, out_channels=in_channels,
kernel_size=kernel_size, stride=stride, padding=padding, groups=
in_channels, bias=False)
def PointwiseConv(in_channels, out_channels):
return M.Conv2d(in_channels=in_channels, out_channels=out_channels,
kernel_size=1, padding=0, bias=True)
class CovSepBlock(M.Module):
def __init__(self, in_channels, out_channels, kernel_size=5, stride=1,
padding=2):
super().__init__()
self.dc = DepthwiseConv(in_channels, kernel_size, stride=stride,
padding=padding)
self.pc = PointwiseConv(in_channels, out_channels)
def forward(self, x):
x = self.dc(x)
x = self.pc(x)
return x
class DownSample(M.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.ordinaryConv1 = CovSepBlock(in_channels=in_channels,
out_channels=out_channels // 4, stride=2)
self.activate = M.ReLU(inplace=True)
self.ordinaryConv2 = CovSepBlock(in_channels=out_channels // 4,
out_channels=out_channels)
self.skipconnect = CovSepBlock(in_channels=in_channels,
out_channels=out_channels, kernel_size=3, stride=2, padding=1)
self.activate2 = M.ReLU(inplace=True)
def forward(self, x):
branch = x
x = self.ordinaryConv1(x)
x = self.activate(x)
x = self.ordinaryConv2(x)
branch = self.skipconnect(branch)
x += branch
x = self.activate2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as M
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tl.store(in_out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_add_convolution_relu_threshold_backward_1(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = tl.full([1], 0, tl.int32)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp9 = 0.0
tmp10 = tmp8 <= tmp9
tl.store(in_out_ptr0 + x3, tmp8, xmask)
tl.store(out_ptr0 + x3, tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (1,), (1,))
assert_size_stride(primals_5, (1, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_6, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_9, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_10, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(2,
2), padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1))
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 2, 2), (4, 4, 2, 1))
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(16)](buf2, primals_4, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_4
buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 1, 2, 2), (4, 4, 2, 1))
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 2, 2), (16, 4, 2, 1))
buf5 = extern_kernels.convolution(primals_1, primals_8, stride=(2,
2), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf5, (4, 4, 2, 2), (16, 4, 2, 1))
buf6 = extern_kernels.convolution(buf5, primals_9, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 2, 2), (16, 4, 2, 1))
buf7 = buf4
del buf4
buf8 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.bool)
triton_poi_fused_add_convolution_relu_threshold_backward_1[grid(64)](
buf7, primals_7, buf6, primals_10, buf8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf6
del primals_10
del primals_7
return (buf7, primals_1, primals_2, primals_3, primals_5, primals_6,
primals_8, primals_9, buf0, buf2, buf3, buf5, buf8)
def DepthwiseConv(in_channels, kernel_size, stride, padding):
return M.Conv2d(in_channels=in_channels, out_channels=in_channels,
kernel_size=kernel_size, stride=stride, padding=padding, groups=
in_channels, bias=False)
def PointwiseConv(in_channels, out_channels):
return M.Conv2d(in_channels=in_channels, out_channels=out_channels,
kernel_size=1, padding=0, bias=True)
class CovSepBlock(M.Module):
def __init__(self, in_channels, out_channels, kernel_size=5, stride=1,
padding=2):
super().__init__()
self.dc = DepthwiseConv(in_channels, kernel_size, stride=stride,
padding=padding)
self.pc = PointwiseConv(in_channels, out_channels)
def forward(self, x):
x = self.dc(x)
x = self.pc(x)
return x
class DownSampleNew(M.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.ordinaryConv1 = CovSepBlock(in_channels=in_channels,
out_channels=out_channels // 4, stride=2)
self.activate = M.ReLU(inplace=True)
self.ordinaryConv2 = CovSepBlock(in_channels=out_channels // 4,
out_channels=out_channels)
self.skipconnect = CovSepBlock(in_channels=in_channels,
out_channels=out_channels, kernel_size=3, stride=2, padding=1)
self.activate2 = M.ReLU(inplace=True)
def forward(self, input_0):
primals_2 = self.ordinaryConv1.dc.weight
primals_3 = self.ordinaryConv1.pc.weight
primals_4 = self.ordinaryConv1.pc.bias
primals_5 = self.ordinaryConv2.dc.weight
primals_6 = self.ordinaryConv2.pc.weight
primals_7 = self.ordinaryConv2.pc.bias
primals_8 = self.skipconnect.dc.weight
primals_9 = self.skipconnect.pc.weight
primals_10 = self.skipconnect.pc.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
| SuperbTUM/RAW-image-denoising | DownSample | false | 17,973 | [
"MIT"
] | 4 | 9f81be8da6a576f641022707d98b8c37f5c599ab | https://github.com/SuperbTUM/RAW-image-denoising/tree/9f81be8da6a576f641022707d98b8c37f5c599ab |
Net_1 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/zr/czr25tlobjiimaa7wlrm22z57yst2xqo7jfhbmp7cbxyaeiaezlj.py
# Topologically Sorted Source Nodes: [conv1d, x_1], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv1d => convolution
# x_1 => gt, mul, where
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze, %primals_2, %primals_3, [1], [4], [1], False, [0], 1), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.05), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
triton_poi_fused_convolution_leaky_relu_0 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 25
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.05
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr1 + (x3), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/v7/cv7fxq22rpu4rzl5vzkgfwkejlicgtdw3vo74ozvphdpl2hkghiv.py
# Topologically Sorted Source Nodes: [conv1d_1, x_2], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv1d_1 => convolution_1
# x_2 => gt_1, mul_1, where_1
# Graph fragment:
# %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_4, %primals_5, [1], [3], [1], False, [0], 1), kwargs = {})
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.05), kwargs = {})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_1, %mul_1), kwargs = {})
triton_poi_fused_convolution_leaky_relu_1 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 16
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.05
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr1 + (x3), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/rg/crgdo2b4c7ix5ul3wbvc6ctuis4jc6ic2e26mxvmd4x2pcjsb7j3.py
# Topologically Sorted Source Nodes: [conv1d_2, x_3], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv1d_2 => convolution_2
# x_3 => gt_2, mul_2, where_2
# Graph fragment:
# %convolution_2 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_1, %primals_6, %primals_7, [1], [3], [1], False, [0], 1), kwargs = {})
# %gt_2 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 0.05), kwargs = {})
# %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %convolution_2, %mul_2), kwargs = {})
triton_poi_fused_convolution_leaky_relu_2 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 160
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 10
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.05
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr1 + (x3), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/co/ccof3jchk7qpebajwtnpxrgwh6xgt6rxyxlfoycsfbffhkupabjt.py
# Topologically Sorted Source Nodes: [conv1d_3, x_4], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv1d_3 => convolution_3
# x_4 => relu
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_2, %primals_8, %primals_9, [1], [0], [1], False, [0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = 0.0
tmp7 = tmp5 <= tmp6
tl.store(in_out_ptr0 + (x0), tmp5, xmask)
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (25, 1, 9), (9, 9, 1))
assert_size_stride(primals_3, (25, ), (1, ))
assert_size_stride(primals_4, (16, 25, 7), (175, 7, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (10, 16, 7), (112, 7, 1))
assert_size_stride(primals_7, (10, ), (1, ))
assert_size_stride(primals_8, (1, 10, 1), (10, 1, 1))
assert_size_stride(primals_9, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4, 1), 0), primals_2, stride=(1,), padding=(4,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 25, 4), (100, 4, 1))
buf1 = empty_strided_cuda((4, 25, 4), (100, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 25, 4), (100, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv1d, x_1], Original ATen: [aten.convolution, aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0.run(buf0, primals_3, buf1, buf2, 400, grid=grid(400), stream=stream0)
del buf0
del primals_3
# Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1,), padding=(3,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf3, (4, 16, 4), (64, 4, 1))
buf4 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.bool)
buf5 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv1d_1, x_2], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_1.run(buf3, primals_5, buf4, buf5, 256, grid=grid(256), stream=stream0)
del buf3
del primals_5
# Topologically Sorted Source Nodes: [conv1d_2], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1,), padding=(3,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf6, (4, 10, 4), (40, 4, 1))
buf7 = empty_strided_cuda((4, 10, 4), (40, 4, 1), torch.bool)
buf8 = empty_strided_cuda((4, 10, 4), (40, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv1d_2, x_3], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_2.run(buf6, primals_7, buf7, buf8, 160, grid=grid(160), stream=stream0)
del buf6
del primals_7
# Topologically Sorted Source Nodes: [conv1d_3], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf8, primals_8, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf9, (4, 1, 4), (4, 4, 1))
buf10 = buf9; del buf9 # reuse
buf11 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv1d_3, x_4], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_3.run(buf10, primals_9, buf11, 16, grid=grid(16), stream=stream0)
del primals_9
return (reinterpret_tensor(buf10, (4, 4), (4, 1), 0), primals_2, primals_4, primals_6, primals_8, reinterpret_tensor(primals_1, (4, 1, 4), (4, 4, 1), 0), buf1, buf2, buf4, buf5, buf7, buf8, buf11, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((25, 1, 9), (9, 9, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((25, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 25, 7), (175, 7, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((10, 16, 7), (112, 7, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, 10, 1), (10, 1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class Net_1(nn.Module):
def __init__(self):
super(Net_1, self).__init__()
self.conv1 = nn.Conv1d(1, 25, 9, padding=4)
self.conv2 = nn.Conv1d(25, 16, 7, padding=3)
self.conv3 = nn.Conv1d(16, 10, 7, padding=3)
self.conv4 = nn.Conv1d(10, 1, 1)
def forward(self, x):
leaky_relu = nn.LeakyReLU(0.05)
nn.Dropout(0.9)
x = torch.unsqueeze(x, 1)
x = leaky_relu(self.conv1(x))
x = leaky_relu(self.conv2(x))
x = leaky_relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = x.squeeze(1)
return x
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 25
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.05
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.05
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 160
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 10
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.05
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_3(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = 0.0
tmp7 = tmp5 <= tmp6
tl.store(in_out_ptr0 + x0, tmp5, xmask)
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (25, 1, 9), (9, 9, 1))
assert_size_stride(primals_3, (25,), (1,))
assert_size_stride(primals_4, (16, 25, 7), (175, 7, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (10, 16, 7), (112, 7, 1))
assert_size_stride(primals_7, (10,), (1,))
assert_size_stride(primals_8, (1, 10, 1), (10, 1, 1))
assert_size_stride(primals_9, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4,
1, 4), (4, 4, 1), 0), primals_2, stride=(1,), padding=(4,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf0, (4, 25, 4), (100, 4, 1))
buf1 = empty_strided_cuda((4, 25, 4), (100, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 25, 4), (100, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0[grid(400)](buf0,
primals_3, buf1, buf2, 400, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del primals_3
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1,),
padding=(3,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf3, (4, 16, 4), (64, 4, 1))
buf4 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.bool)
buf5 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
triton_poi_fused_convolution_leaky_relu_1[grid(256)](buf3,
primals_5, buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf3
del primals_5
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1,),
padding=(3,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf6, (4, 10, 4), (40, 4, 1))
buf7 = empty_strided_cuda((4, 10, 4), (40, 4, 1), torch.bool)
buf8 = empty_strided_cuda((4, 10, 4), (40, 4, 1), torch.float32)
triton_poi_fused_convolution_leaky_relu_2[grid(160)](buf6,
primals_7, buf7, buf8, 160, XBLOCK=128, num_warps=4, num_stages=1)
del buf6
del primals_7
buf9 = extern_kernels.convolution(buf8, primals_8, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf9, (4, 1, 4), (4, 4, 1))
buf10 = buf9
del buf9
buf11 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_3[grid(16)](buf10,
primals_9, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_9
return reinterpret_tensor(buf10, (4, 4), (4, 1), 0
), primals_2, primals_4, primals_6, primals_8, reinterpret_tensor(
primals_1, (4, 1, 4), (4, 4, 1), 0
), buf1, buf2, buf4, buf5, buf7, buf8, buf11
class Net_1New(nn.Module):
def __init__(self):
super(Net_1New, self).__init__()
self.conv1 = nn.Conv1d(1, 25, 9, padding=4)
self.conv2 = nn.Conv1d(25, 16, 7, padding=3)
self.conv3 = nn.Conv1d(16, 10, 7, padding=3)
self.conv4 = nn.Conv1d(10, 1, 1)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| TakaraResearch/Signal-Detection-with-Wasserstein-Loss | Net_1 | false | 17,974 | [
"BSD-3-Clause"
] | 9 | f210bd0da7492a72bc204a5517e74ba515b5ad12 | https://github.com/TakaraResearch/Signal-Detection-with-Wasserstein-Loss/tree/f210bd0da7492a72bc204a5517e74ba515b5ad12 |
GCN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/wv/cwvwn4skuzp6hvqejtyxfz5qh5bqj7bnmxjbe4yznbquqqgow5kx.py
# Topologically Sorted Source Nodes: [x1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%mm_1,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/2d/c2dizfaekcnz32ufz3uo3z65uz7anm6zf67prosayb2z6w7nyjhu.py
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mm_3, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mm_3, %amax), kwargs = {})
triton_poi_fused__log_softmax_1 = async_compile.triton('triton_poi_fused__log_softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ce/ccevra67aphe5dpz3mn5hiaxb3trxtxxtuaongwy4mcgun6uapq7.py
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => exp, log, sub_1, sum_1
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_poi_fused__log_softmax_2 = async_compile.triton('triton_poi_fused__log_softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [support], Original ATen: [aten.mm]
extern_kernels.mm(primals_2, primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.mm]
extern_kernels.mm(primals_3, buf0, out=buf1)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [x1], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf2, 16, grid=grid(16), stream=stream0)
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [support_1], Original ATen: [aten.mm]
extern_kernels.mm(buf2, primals_4, out=buf3)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.mm]
extern_kernels.mm(primals_3, buf3, out=buf4)
buf5 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_1.run(buf4, buf5, 16, grid=grid(16), stream=stream0)
buf6 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_2.run(buf5, buf6, 16, grid=grid(16), stream=stream0)
del buf5
return (buf6, buf2, buf2, buf6, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.autograd
import torch.nn as nn
class GraphConv(nn.Module):
def __init__(self, in_features, out_features, bias=False):
super(GraphConv, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.W = nn.Parameter(torch.empty(size=(in_features, out_features)))
nn.init.xavier_uniform_(self.W.data, gain=1.414)
def forward(self, input, adj):
support = torch.mm(input, self.W)
output = torch.mm(adj, support)
return output
class GCN(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout):
super(GCN, self).__init__()
self.gc1 = GraphConv(nfeat, nhid)
self.gc2 = GraphConv(nhid, nclass)
self.dropout = dropout
def forward(self, x, adj):
x1 = F.relu(self.gc1(x, adj))
x2 = F.dropout(x1, self.dropout, training=self.training)
x3 = self.gc2(x2, adj)
return F.log_softmax(x3, dim=1), x2
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'nfeat': 4, 'nhid': 4, 'nclass': 4, 'dropout': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.autograd
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_2, primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_3, buf0, out=buf1)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_relu_0[grid(16)](buf2, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf3 = buf0
del buf0
extern_kernels.mm(buf2, primals_4, out=buf3)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_3, buf3, out=buf4)
buf5 = buf3
del buf3
triton_poi_fused__log_softmax_1[grid(16)](buf4, buf5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused__log_softmax_2[grid(16)](buf5, buf6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf5
return buf6, buf2, buf2, buf6, reinterpret_tensor(primals_3, (4, 4), (1,
4), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0)
class GraphConv(nn.Module):
def __init__(self, in_features, out_features, bias=False):
super(GraphConv, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.W = nn.Parameter(torch.empty(size=(in_features, out_features)))
nn.init.xavier_uniform_(self.W.data, gain=1.414)
def forward(self, input, adj):
support = torch.mm(input, self.W)
output = torch.mm(adj, support)
return output
class GCNNew(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout):
super(GCNNew, self).__init__()
self.gc1 = GraphConv(nfeat, nhid)
self.gc2 = GraphConv(nhid, nclass)
self.dropout = dropout
def forward(self, input_0, input_1):
primals_1 = self.gc1.W
primals_2 = self.gc2.W
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0], output[1]
| SsGood/MMGL | GCN | false | 17,975 | [
"MIT"
] | 6 | ea769e46fffb42559e764e2912c5b1dc17c10af2 | https://github.com/SsGood/MMGL/tree/ea769e46fffb42559e764e2912c5b1dc17c10af2 |
PositionwiseFeedForward | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/5x/c5xv5gq6dn4fjpeonsfgmuehewjbhajmic4gqniclar6ryc44srk.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 8), (8, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 8), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 8), (128, 32, 8, 1), 0); del buf0 # reuse
buf3 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf3, 512, grid=grid(512), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 8), (8, 1), 0), reinterpret_tensor(primals_4, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf2)
del primals_5
return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 8), (8, 1), 0), primals_4, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class PositionwiseFeedForward(nn.Module):
def __init__(self, individual_featured):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(individual_featured, 2 * individual_featured)
self.w_2 = nn.Linear(2 * individual_featured, individual_featured)
self.dropout = nn.Dropout(0.2)
def forward(self, x):
return self.w_2(self.dropout(F.relu(self.w_1(x))))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'individual_featured': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 8), (8, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 8), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 8), (128, 32, 8, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(512)](buf1,
primals_2, buf3, 512, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 8), (
8, 1), 0), reinterpret_tensor(primals_4, (8, 4), (1, 8), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 8), (8, 1), 0), primals_4, buf3
class PositionwiseFeedForwardNew(nn.Module):
def __init__(self, individual_featured):
super(PositionwiseFeedForwardNew, self).__init__()
self.w_1 = nn.Linear(individual_featured, 2 * individual_featured)
self.w_2 = nn.Linear(2 * individual_featured, individual_featured)
self.dropout = nn.Dropout(0.2)
def forward(self, input_0):
primals_1 = self.w_1.weight
primals_2 = self.w_1.bias
primals_4 = self.w_2.weight
primals_5 = self.w_2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| Sunner4nwpu/RA-UWML-AU-Pytorch | PositionwiseFeedForward | false | 17,976 | [
"Apache-2.0"
] | 5 | 7d20b2f1ffa8a00595d1e75e0d1c15518a37a920 | https://github.com/Sunner4nwpu/RA-UWML-AU-Pytorch/tree/7d20b2f1ffa8a00595d1e75e0d1c15518a37a920 |
FeedForwardLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/jb/cjbxzli6v7rwzt24mjtja7cu7a6zbkabsw5two6m5axucpijmjfr.py
# Topologically Sorted Source Nodes: [gelu], Original ATen: [aten.gelu]
# Source node to ATen node mapping:
# gelu => add, erf, mul, mul_1, mul_2
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.7071067811865476), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {})
triton_poi_fused_gelu_0 = async_compile.triton('triton_poi_fused_gelu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/f6/cf6g5vjl6clpvfa2j7jw5adg3xchgkyal7cg5smxzk57hjhn3cgo.py
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# x_2 => add_1
# x_3 => var_mean
# Graph fragment:
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_1), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_1, [3]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_1 = async_compile.triton('triton_poi_fused_add_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/lr/clr2pglgwuw4ru27thoprqql5mtpl65cfmrqskcakhptk7pxkxg5.py
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# x_2 => add_1
# x_3 => add_2, add_3, mul_3, mul_4, rsqrt, sub
# Graph fragment:
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_1), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-06), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_1, %getitem_1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %primals_6), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_7), kwargs = {})
triton_poi_fused_add_native_layer_norm_2 = async_compile.triton('triton_poi_fused_add_native_layer_norm_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [gelu], Original ATen: [aten.gelu]
stream0 = get_raw_stream(0)
triton_poi_fused_gelu_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_1.run(buf2, primals_1, buf3, buf4, 64, grid=grid(64), stream=stream0)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_2.run(buf2, primals_1, buf3, buf4, primals_6, primals_7, buf5, 256, grid=grid(256), stream=stream0)
del buf3
del buf4
del primals_7
return (buf5, primals_1, primals_6, buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf2, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.autograd
import torch.nn as nn
class FeedForwardLayer(nn.Module):
def __init__(self, d_in, d_hid, dropout=0.1):
super().__init__()
self.w_1 = nn.Linear(d_in, d_hid)
self.w_2 = nn.Linear(d_hid, d_in)
self.layer_norm = nn.LayerNorm(d_in, eps=1e-06)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
residual = x
x = self.w_2(F.gelu(self.w_1(x)))
x = self.dropout(x)
x += residual
x = self.layer_norm(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_in': 4, 'd_hid': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.autograd
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_gelu_0[grid(256)](buf0, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused_add_native_layer_norm_1[grid(64)](buf2, primals_1,
buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_2[grid(256)](buf2, primals_1,
buf3, buf4, primals_6, primals_7, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf3
del buf4
del primals_7
return buf5, primals_1, primals_6, buf0, reinterpret_tensor(buf1, (64,
4), (4, 1), 0), buf2, primals_4
class FeedForwardLayerNew(nn.Module):
def __init__(self, d_in, d_hid, dropout=0.1):
super().__init__()
self.w_1 = nn.Linear(d_in, d_hid)
self.w_2 = nn.Linear(d_hid, d_in)
self.layer_norm = nn.LayerNorm(d_in, eps=1e-06)
self.dropout = nn.Dropout(dropout)
def forward(self, input_0):
primals_2 = self.w_1.weight
primals_3 = self.w_1.bias
primals_4 = self.w_2.weight
primals_5 = self.w_2.bias
primals_6 = self.layer_norm.weight
primals_7 = self.layer_norm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| SsGood/MMGL | FeedForwardLayer | false | 17,977 | [
"MIT"
] | 6 | ea769e46fffb42559e764e2912c5b1dc17c10af2 | https://github.com/SsGood/MMGL/tree/ea769e46fffb42559e764e2912c5b1dc17c10af2 |
Upsampling | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/y2/cy2axpijxk6u2b7v6le3asl4e5eardypooir66wsqq5fhx67j6in.py
# Topologically Sorted Source Nodes: [conv_transpose2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv_transpose2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 64) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 2, 2), (16, 4, 2, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv_transpose2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 8, 8), (256, 64, 8, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv_transpose2d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 1024, grid=grid(1024), stream=stream0)
del primals_2
return (buf1, primals_1, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 2, 2), (16, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as M
class Upsampling(M.Module):
def __init__(self, in_channels, out_channels, kernel_size=2):
super().__init__()
self.upsample = M.ConvTranspose2d(in_channels, out_channels,
kernel_size=kernel_size, stride=2)
def forward(self, x):
return self.upsample(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as M
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 64 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 2, 2), (16, 4, 2, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2,
2), padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 8, 8), (256, 64, 8, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(1024)](buf1, primals_2, 1024,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3
class UpsamplingNew(M.Module):
def __init__(self, in_channels, out_channels, kernel_size=2):
super().__init__()
self.upsample = M.ConvTranspose2d(in_channels, out_channels,
kernel_size=kernel_size, stride=2)
def forward(self, input_0):
primals_1 = self.upsample.weight
primals_2 = self.upsample.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| SuperbTUM/RAW-image-denoising | Upsampling | false | 17,978 | [
"MIT"
] | 4 | 9f81be8da6a576f641022707d98b8c37f5c599ab | https://github.com/SuperbTUM/RAW-image-denoising/tree/9f81be8da6a576f641022707d98b8c37f5c599ab |
Signal2SH | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/zw/czwhlthm2sc6sq34yp7bxkh3f2puig2ul4jw33jkaofihjaczfdm.py
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# y => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 256
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 64
y1 = (yindex // 64)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (64*x2) + (256*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/uk/cukyc7sbm5bchfpha6owrrvxm5syxm463prf7fwi4kgc5or2sgak.py
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 64], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 60
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 15
y1 = (yindex // 15)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (15*x2) + (960*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (64*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 15), (1, 4))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4, 4, 4), (256, 256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(arg0_1, buf0, 256, 4, grid=grid(256, 4), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((64, 4, 15), (60, 15, 1), torch.float32)
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (64, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg1_1, (64, 4, 15), (0, 1, 4), 0), out=buf1)
del arg1_1
del buf0
buf2 = empty_strided_cuda((4, 1, 15, 4, 4, 4), (960, 960, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf1, buf2, 60, 64, grid=grid(60, 64), stream=stream0)
del buf1
return (reinterpret_tensor(buf2, (4, 15, 4, 4, 4), (960, 64, 16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 15), (1, 4), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
from scipy import special as sci
def cart2sph(x, y, z):
"""
cart2sph(x, y, z) -> theta, phi, r
Computes the corresponding spherical coordinate of the given input parameters :attr:`x`, :attr:`y` and :attr:`x`.
Args:
x (Number): x position
y (Number): y position
z (Number): z position
Example::
>>> cart2sph(1, 1, 1)
(0.78539816339744828, 0.95531661812450919, 1.7320508075688772)
"""
azimuthal_angle = np.arctan2(y, x)
radial_distance = np.sqrt(x ** 2 + y ** 2 + z ** 2)
polar_angle = np.arccos(z / radial_distance)
return azimuthal_angle, polar_angle, radial_distance
class Signal2SH(nn.Module):
"""
Signal2SH(dwi) -> dwi_sh
Computes the corresponding spherical harmonic coefficients
Args:
x_in (5D tensor): input dwi tensor
x_in.size(): (Batchsize x Number of shells * Number of gradients x DimX x DimY x DimZ)
y (5D tensor): corresponding harmonic coefficients tensor
y.size(): (Batchsize x Number of shells*Number of coefficients x DimX x DimY x DimZ)
"""
def __init__(self, sh_order, gradients, lb_lambda=0.006):
super(Signal2SH, self).__init__()
self.sh_order = sh_order
self.lb_lambda = lb_lambda
self.num_gradients = gradients.shape[0]
self.num_coefficients = int((self.sh_order + 1) * (self.sh_order /
2 + 1))
b = np.zeros((self.num_gradients, self.num_coefficients))
l = np.zeros((self.num_coefficients, self.num_coefficients))
for id_gradient in range(self.num_gradients):
id_column = 0
for id_order in range(0, self.sh_order + 1, 2):
for id_degree in range(-id_order, id_order + 1):
gradients_phi, gradients_theta, _gradients_z = cart2sph(
gradients[id_gradient, 0], gradients[id_gradient, 1
], gradients[id_gradient, 2])
y = sci.sph_harm(np.abs(id_degree), id_order,
gradients_phi, gradients_theta)
if id_degree < 0:
b[id_gradient, id_column] = np.real(y) * np.sqrt(2)
elif id_degree == 0:
b[id_gradient, id_column] = np.real(y)
elif id_degree > 0:
b[id_gradient, id_column] = np.imag(y) * np.sqrt(2)
l[id_column, id_column
] = self.lb_lambda * id_order ** 2 * (id_order + 1
) ** 2
id_column += 1
b_inv = np.linalg.pinv(np.matmul(b.transpose(), b) + l)
self.Signal2SHMat = torch.nn.Parameter(torch.from_numpy(np.matmul(
b_inv, b.transpose()).transpose()).float(), requires_grad=False)
def forward(self, x_in):
x = x_in.reshape((-1, np.ceil(x_in.size(1) / self.num_gradients).
astype(int), self.num_gradients, x_in.size(2), x_in.size(3),
x_in.size(4)))
x = x.permute(0, 1, 3, 4, 5, 2)
y = x.matmul(self.Signal2SHMat)
y = y.permute(0, 1, 5, 2, 3, 4).contiguous().reshape((x.size(0), -1,
x_in.size(2), x_in.size(3), x_in.size(4)))
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [[], {'sh_order': 4, 'gradients': torch.rand([4, 4])}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy as np
import torch.nn as nn
from scipy import special as sci
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 256
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 64
y1 = yindex // 64
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 64 * x2 + 256 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 60
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 15
y1 = yindex // 15
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 15 * x2 + 960 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 64 * y3), tmp0, xmask & ymask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 15), (1, 4))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4, 4, 4), (256, 256, 64, 16, 4,
1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256, 4)](arg0_1, buf0, 256, 4, XBLOCK
=4, YBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((64, 4, 15), (60, 15, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (64, 4, 4), (16, 4, 1),
0), reinterpret_tensor(arg1_1, (64, 4, 15), (0, 1, 4), 0), out=buf1
)
del arg1_1
del buf0
buf2 = empty_strided_cuda((4, 1, 15, 4, 4, 4), (960, 960, 64, 16, 4,
1), torch.float32)
triton_poi_fused_clone_1[grid(60, 64)](buf1, buf2, 60, 64, XBLOCK=
32, YBLOCK=32, num_warps=4, num_stages=1)
del buf1
return reinterpret_tensor(buf2, (4, 15, 4, 4, 4), (960, 64, 16, 4, 1), 0),
def cart2sph(x, y, z):
"""
cart2sph(x, y, z) -> theta, phi, r
Computes the corresponding spherical coordinate of the given input parameters :attr:`x`, :attr:`y` and :attr:`x`.
Args:
x (Number): x position
y (Number): y position
z (Number): z position
Example::
>>> cart2sph(1, 1, 1)
(0.78539816339744828, 0.95531661812450919, 1.7320508075688772)
"""
azimuthal_angle = np.arctan2(y, x)
radial_distance = np.sqrt(x ** 2 + y ** 2 + z ** 2)
polar_angle = np.arccos(z / radial_distance)
return azimuthal_angle, polar_angle, radial_distance
class Signal2SHNew(nn.Module):
"""
Signal2SH(dwi) -> dwi_sh
Computes the corresponding spherical harmonic coefficients
Args:
x_in (5D tensor): input dwi tensor
x_in.size(): (Batchsize x Number of shells * Number of gradients x DimX x DimY x DimZ)
y (5D tensor): corresponding harmonic coefficients tensor
y.size(): (Batchsize x Number of shells*Number of coefficients x DimX x DimY x DimZ)
"""
def __init__(self, sh_order, gradients, lb_lambda=0.006):
super(Signal2SHNew, self).__init__()
self.sh_order = sh_order
self.lb_lambda = lb_lambda
self.num_gradients = gradients.shape[0]
self.num_coefficients = int((self.sh_order + 1) * (self.sh_order /
2 + 1))
b = np.zeros((self.num_gradients, self.num_coefficients))
l = np.zeros((self.num_coefficients, self.num_coefficients))
for id_gradient in range(self.num_gradients):
id_column = 0
for id_order in range(0, self.sh_order + 1, 2):
for id_degree in range(-id_order, id_order + 1):
gradients_phi, gradients_theta, _gradients_z = cart2sph(
gradients[id_gradient, 0], gradients[id_gradient, 1
], gradients[id_gradient, 2])
y = sci.sph_harm(np.abs(id_degree), id_order,
gradients_phi, gradients_theta)
if id_degree < 0:
b[id_gradient, id_column] = np.real(y) * np.sqrt(2)
elif id_degree == 0:
b[id_gradient, id_column] = np.real(y)
elif id_degree > 0:
b[id_gradient, id_column] = np.imag(y) * np.sqrt(2)
l[id_column, id_column
] = self.lb_lambda * id_order ** 2 * (id_order + 1
) ** 2
id_column += 1
b_inv = np.linalg.pinv(np.matmul(b.transpose(), b) + l)
self.Signal2SHMat = torch.nn.Parameter(torch.from_numpy(np.matmul(
b_inv, b.transpose()).transpose()).float(), requires_grad=False)
def forward(self, input_0):
arg1_1 = self.Signal2SHMat
arg0_1 = input_0
output = call([arg0_1, arg1_1])
return output[0]
| SimonKoppers/DELIMIT | Signal2SH | false | 17,979 | [
"MIT"
] | 7 | d778a567bbec1beef2395ead60aa1e30086bb07c | https://github.com/SimonKoppers/DELIMIT/tree/d778a567bbec1beef2395ead60aa1e30086bb07c |
TransformerEncoderLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/25/c25xezvaufcxosn2zaablzq3d4dz4ocigm5s7w52r36lqwucoptj.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# x => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [1]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/uv/cuvyzdrclvsiubqwnkqyzqogybjelcuaoywggvz52bz5abo4hmua.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# x => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [1]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {})
triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/5o/c5oauw3z3ep2ftanta4riae32t6z5u67plt43eekrznregbmlpjr.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# multi_head_attention_forward => mul_2
# Graph fragment:
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_3, 1.0), kwargs = {})
triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/kj/ckjie4jfhwucdjwrkoxyorl5ltzj7tre6m3jengzvslyxc6fnmbt.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => amax, exp, sub_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/bt/cbt34fhodpavx2oq24hpku3ag5gk3mibbr2ozkxz4z7bs2m23gfy.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/fe/cfech2rxtnpxedtqlj5kjrdsdntbttrruuwxu7hmdxbzkefn6e6q.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# multi_head_attention_forward => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_5 = async_compile.triton('triton_poi_fused_clone_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask)
tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/4s/c4sghfflzlvhg76ckzrkj2yv7ethqif4fe437f2dxqtwuh4ahinp.py
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# x_2 => add_2
# x_3 => var_mean_1
# Graph fragment:
# %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %squeeze), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_2, [1]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_6 = async_compile.triton('triton_poi_fused_add_native_layer_norm_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/cm/ccma2nw3h6tzmwtufo45byghevpzamho2ay363inl5jh2l67anpn.py
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# x_2 => add_2
# x_3 => add_3, add_4, mul_3, mul_4, rsqrt_1, sub_2
# Graph fragment:
# %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %squeeze), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_8, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_3,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %getitem_9), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %primals_8), kwargs = {})
# %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_9), kwargs = {})
triton_poi_fused_add_native_layer_norm_7 = async_compile.triton('triton_poi_fused_add_native_layer_norm_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/xy/cxyww2txopwpx25tsbwly3yxypvgzdkmo5kybevltblykxeczke5.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_4 => relu
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_11), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_8 = async_compile.triton('triton_poi_fused_relu_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/cb/ccbk5xotcsqtl2ksko2mwsbwmw3wuum5asogvfe7nzhucp3hqc2c.py
# Topologically Sorted Source Nodes: [x_2, x_8], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x_2 => add_2
# x_8 => add_5
# Graph fragment:
# %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %squeeze), kwargs = {})
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_13), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %add_tensor), kwargs = {})
triton_poi_fused_add_9 = async_compile.triton('triton_poi_fused_add_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_out_ptr0 + (x2), xmask)
tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, 4), (4, 1))
assert_size_stride(primals_13, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(primals_1, buf0, buf1, 4, grid=grid(4), stream=stream0)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(primals_1, buf0, buf1, primals_2, primals_3, buf2, 16, grid=grid(16), stream=stream0)
del primals_2
del primals_3
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_5, (4, ), (1, ), 4), buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf4)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_5, (4, ), (1, ), 8), buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf5)
buf6 = reinterpret_tensor(buf3, (4, 4, 1), (1, 4, 16), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul]
triton_poi_fused_mul_2.run(buf6, primals_5, 16, grid=grid(16), stream=stream0)
del primals_5
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(buf6, reinterpret_tensor(buf4, (4, 1, 4), (1, 1, 4), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf7, buf8, 64, grid=grid(64), stream=stream0)
buf9 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf8, buf9, 64, grid=grid(64), stream=stream0)
del buf8
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(buf9, reinterpret_tensor(buf5, (4, 4, 1), (1, 4, 1), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
triton_poi_fused_clone_5.run(buf10, buf11, 4, 4, grid=grid(4, 4), stream=stream0)
buf12 = reinterpret_tensor(buf10, (4, 4), (4, 1), 0); del buf10 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf11, (4, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf12)
del primals_7
buf13 = buf1; del buf1 # reuse
buf14 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_6.run(primals_1, buf12, buf13, buf14, 4, grid=grid(4), stream=stream0)
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_7.run(primals_1, buf12, buf13, buf14, primals_8, primals_9, buf15, 16, grid=grid(16), stream=stream0)
del buf13
del buf14
del primals_9
buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf15, reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf16)
buf17 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
triton_poi_fused_relu_8.run(buf17, primals_11, 16, grid=grid(16), stream=stream0)
del primals_11
buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf17, reinterpret_tensor(primals_12, (4, 4), (1, 4), 0), out=buf18)
buf19 = buf18; del buf18 # reuse
# Topologically Sorted Source Nodes: [x_2, x_8], Original ATen: [aten.add]
triton_poi_fused_add_9.run(buf19, primals_1, buf12, primals_13, 16, grid=grid(16), stream=stream0)
del primals_13
return (buf19, primals_1, primals_8, buf2, buf9, reinterpret_tensor(buf11, (4, 4), (4, 1), 0), buf12, buf15, buf17, primals_12, primals_10, primals_6, reinterpret_tensor(buf5, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf6, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf4, (4, 4, 1), (1, 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (4, 1), 32), reinterpret_tensor(primals_4, (4, 4), (4, 1), 16), reinterpret_tensor(primals_4, (4, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.utils.data
import torch.distributions
class TransformerEncoderLayer(nn.Module):
def __init__(self, embed_dim, num_heads, hidden_size, dropout=0.0,
attention_dropout=0.0, activation_dropout=0.0):
super().__init__()
self.embed_dim = embed_dim
self.self_attn = torch.nn.MultiheadAttention(embed_dim=self.
embed_dim, num_heads=num_heads, dropout=attention_dropout)
self.self_attn_layer_norm = torch.nn.LayerNorm(self.embed_dim)
self.dropout = dropout
self.activation_dropout = activation_dropout
self.normalize_before = True
self.fc1 = torch.nn.Linear(self.embed_dim, hidden_size)
self.fc2 = torch.nn.Linear(hidden_size, self.embed_dim)
self.layer_norm = torch.nn.LayerNorm(self.embed_dim)
self.init_parameters()
def forward(self, x, key_padding_mask=None, attn_mask=None):
residual = x
x = self.self_attn_layer_norm(x)
x, _att = self.self_attn(query=x, key=x, value=x, key_padding_mask=
key_padding_mask, attn_mask=attn_mask)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
residual = x
x = self.layer_norm(x)
x = F.relu(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
return x
def init_parameters(self):
nn.init.xavier_uniform_(self.fc1.weight)
nn.init.constant_(self.fc1.bias, 0.0)
nn.init.xavier_uniform_(self.fc2.weight)
nn.init.constant_(self.fc2.bias, 0.0)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'embed_dim': 4, 'num_heads': 4, 'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
import torch.distributions
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_out_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 4), (4, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(4)](primals_1, buf0, buf1,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(16)](primals_1, buf0,
buf1, primals_2, primals_3, buf2, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del primals_2
del primals_3
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4
), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 4),
buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=
1, beta=1, out=buf4)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 8),
buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=
1, beta=1, out=buf5)
buf6 = reinterpret_tensor(buf3, (4, 4, 1), (1, 4, 16), 0)
del buf3
triton_poi_fused_mul_2[grid(16)](buf6, primals_5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf6, reinterpret_tensor(buf4, (4, 1, 4), (1, 1,
4), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(64)](buf7, buf8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf9 = buf7
del buf7
triton_poi_fused__softmax_4[grid(64)](buf8, buf9, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf8
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf9, reinterpret_tensor(buf5, (4, 4, 1), (1, 4,
1), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused_clone_5[grid(4, 4)](buf10, buf11, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf12 = reinterpret_tensor(buf10, (4, 4), (4, 1), 0)
del buf10
extern_kernels.addmm(primals_7, reinterpret_tensor(buf11, (4, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf12)
del primals_7
buf13 = buf1
del buf1
buf14 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_6[grid(4)](primals_1, buf12,
buf13, buf14, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_7[grid(16)](primals_1, buf12,
buf13, buf14, primals_8, primals_9, buf15, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf13
del buf14
del primals_9
buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf15, reinterpret_tensor(primals_10, (4, 4), (1,
4), 0), out=buf16)
buf17 = buf16
del buf16
triton_poi_fused_relu_8[grid(16)](buf17, primals_11, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_11
buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf17, reinterpret_tensor(primals_12, (4, 4), (1,
4), 0), out=buf18)
buf19 = buf18
del buf18
triton_poi_fused_add_9[grid(16)](buf19, primals_1, buf12,
primals_13, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_13
return (buf19, primals_1, primals_8, buf2, buf9, reinterpret_tensor(
buf11, (4, 4), (4, 1), 0), buf12, buf15, buf17, primals_12,
primals_10, primals_6, reinterpret_tensor(buf5, (4, 1, 4), (1, 1, 4
), 0), reinterpret_tensor(buf6, (4, 1, 4), (1, 1, 4), 0),
reinterpret_tensor(buf4, (4, 4, 1), (1, 4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (4, 1), 32),
reinterpret_tensor(primals_4, (4, 4), (4, 1), 16),
reinterpret_tensor(primals_4, (4, 4), (4, 1), 0))
class TransformerEncoderLayerNew(nn.Module):
def __init__(self, embed_dim, num_heads, hidden_size, dropout=0.0,
attention_dropout=0.0, activation_dropout=0.0):
super().__init__()
self.embed_dim = embed_dim
self.self_attn = torch.nn.MultiheadAttention(embed_dim=self.
embed_dim, num_heads=num_heads, dropout=attention_dropout)
self.self_attn_layer_norm = torch.nn.LayerNorm(self.embed_dim)
self.dropout = dropout
self.activation_dropout = activation_dropout
self.normalize_before = True
self.fc1 = torch.nn.Linear(self.embed_dim, hidden_size)
self.fc2 = torch.nn.Linear(hidden_size, self.embed_dim)
self.layer_norm = torch.nn.LayerNorm(self.embed_dim)
self.init_parameters()
def init_parameters(self):
nn.init.xavier_uniform_(self.fc1.weight)
nn.init.constant_(self.fc1.bias, 0.0)
nn.init.xavier_uniform_(self.fc2.weight)
nn.init.constant_(self.fc2.bias, 0.0)
def forward(self, input_0):
primals_4 = self.self_attn.in_proj_weight
primals_5 = self.self_attn.in_proj_bias
primals_1 = self.self_attn.out_proj.weight
primals_2 = self.self_attn.out_proj.bias
primals_3 = self.self_attn_layer_norm.weight
primals_7 = self.self_attn_layer_norm.bias
primals_6 = self.fc1.weight
primals_8 = self.fc1.bias
primals_10 = self.fc2.weight
primals_9 = self.fc2.bias
primals_11 = self.layer_norm.weight
primals_13 = self.layer_norm.bias
primals_12 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
| Slowika/GameBias-EmeCom2020 | TransformerEncoderLayer | false | 17,980 | [
"MIT"
] | 5 | 5b94c47559f8202bca99c26fc1bcb078dd0509a6 | https://github.com/Slowika/GameBias-EmeCom2020/tree/5b94c47559f8202bca99c26fc1bcb078dd0509a6 |
PartialConv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/dv/cdvmujdx4wzp3xf3q2rdnt72xso7clbkadprbhlirztwjsalq3rx.py
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %primals_2), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/w7/cw7qmyjnvjbamriiio3swclguy3uitook4fvr3c24vavr2egg5ob.py
# Topologically Sorted Source Nodes: [output, no_update_holes, mask_sum, sub, truediv, output_pre, output_1, new_mask, new_mask_1], Original ATen: [aten.convolution, aten.eq, aten.masked_fill, aten.sub, aten.div, aten.add, aten.ones_like]
# Source node to ATen node mapping:
# mask_sum => full_default, where
# new_mask => full_default_2
# new_mask_1 => where_2
# no_update_holes => eq
# output => convolution
# output_1 => full_default_1, where_1
# output_pre => add
# sub => sub
# truediv => div
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mul, %primals_3, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %eq : [num_users=3] = call_function[target=torch.ops.aten.eq.Scalar](args = (%convolution_1, 0), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %convolution_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convolution, %expand), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %where), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %expand), kwargs = {})
# %full_default_1 : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default_1, %add), kwargs = {})
# %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 1, 1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default_1, %full_default_2), kwargs = {})
triton_poi_fused_add_convolution_div_eq_masked_fill_ones_like_sub_1 = async_compile.triton('triton_poi_fused_add_convolution_div_eq_masked_fill_ones_like_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_div_eq_masked_fill_ones_like_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_div_eq_masked_fill_ones_like_sub_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_out_ptr0 + (x2), xmask)
tmp4 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp5 - tmp4
tmp7 = 1.0
tmp8 = tl.where(tmp2, tmp7, tmp0)
tmp9 = tmp6 / tmp8
tmp10 = tmp9 + tmp4
tmp11 = tl.where(tmp2, tmp1, tmp10)
tmp12 = tl.where(tmp2, tmp1, tmp7)
tl.store(in_out_ptr0 + (x2), tmp11, xmask)
tl.store(out_ptr0 + (x2), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1))
# Topologically Sorted Source Nodes: [output_mask], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(primals_2, primals_5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
del primals_2
del primals_5
buf3 = buf1; del buf1 # reuse
buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [output, no_update_holes, mask_sum, sub, truediv, output_pre, output_1, new_mask, new_mask_1], Original ATen: [aten.convolution, aten.eq, aten.masked_fill, aten.sub, aten.div, aten.add, aten.ones_like]
triton_poi_fused_add_convolution_div_eq_masked_fill_ones_like_sub_1.run(buf3, buf2, primals_4, buf4, 16, grid=grid(16), stream=stream0)
del primals_4
return (buf3, buf4, primals_3, buf0, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
from itertools import product as product
import torch.nn as nn
def weights_init(init_type='gaussian'):
def init_fun(m):
classname = m.__class__.__name__
if (classname.find('Conv') == 0 or classname.find('Linear') == 0
) and hasattr(m, 'weight'):
if init_type == 'gaussian':
nn.init.normal_(m.weight, 0.0, 0.02)
elif init_type == 'xavier':
nn.init.xavier_normal_(m.weight, gain=math.sqrt(2))
elif init_type == 'kaiming':
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
elif init_type == 'orthogonal':
nn.init.orthogonal_(m.weight, gain=math.sqrt(2))
elif init_type == 'default':
pass
else:
assert 0, 'Unsupported initialization: {}'.format(init_type)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias, 0.0)
return init_fun
class PartialConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super().__init__()
self.input_conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, bias)
self.mask_conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, False)
self.input_conv.apply(weights_init('kaiming'))
torch.nn.init.constant_(self.mask_conv.weight, 1.0)
for param in self.mask_conv.parameters():
param.requires_grad = False
def forward(self, input, mask):
output = self.input_conv(input * mask)
if self.input_conv.bias is not None:
output_bias = self.input_conv.bias.view(1, -1, 1, 1).expand_as(
output)
else:
output_bias = torch.zeros_like(output)
with torch.no_grad():
output_mask = self.mask_conv(mask)
no_update_holes = output_mask == 0
mask_sum = output_mask.masked_fill_(no_update_holes, 1.0)
output_pre = (output - output_bias) / mask_sum + output_bias
output = output_pre.masked_fill_(no_update_holes, 0.0)
new_mask = torch.ones_like(output)
new_mask = new_mask.masked_fill_(no_update_holes, 0.0)
return output, new_mask
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
from itertools import product as product
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_convolution_div_eq_masked_fill_ones_like_sub_1(
in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_out_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp5 - tmp4
tmp7 = 1.0
tmp8 = tl.where(tmp2, tmp7, tmp0)
tmp9 = tmp6 / tmp8
tmp10 = tmp9 + tmp4
tmp11 = tl.where(tmp2, tmp1, tmp10)
tmp12 = tl.where(tmp2, tmp1, tmp7)
tl.store(in_out_ptr0 + x2, tmp11, xmask)
tl.store(out_ptr0 + x2, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](primals_1, primals_2, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1))
buf2 = extern_kernels.convolution(primals_2, primals_5, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
del primals_2
del primals_5
buf3 = buf1
del buf1
buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
triton_poi_fused_add_convolution_div_eq_masked_fill_ones_like_sub_1[
grid(16)](buf3, buf2, primals_4, buf4, 16, XBLOCK=16, num_warps
=1, num_stages=1)
del primals_4
return buf3, buf4, primals_3, buf0, buf2
def weights_init(init_type='gaussian'):
def init_fun(m):
classname = m.__class__.__name__
if (classname.find('Conv') == 0 or classname.find('Linear') == 0
) and hasattr(m, 'weight'):
if init_type == 'gaussian':
nn.init.normal_(m.weight, 0.0, 0.02)
elif init_type == 'xavier':
nn.init.xavier_normal_(m.weight, gain=math.sqrt(2))
elif init_type == 'kaiming':
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
elif init_type == 'orthogonal':
nn.init.orthogonal_(m.weight, gain=math.sqrt(2))
elif init_type == 'default':
pass
else:
assert 0, 'Unsupported initialization: {}'.format(init_type)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias, 0.0)
return init_fun
class PartialConvNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super().__init__()
self.input_conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, bias)
self.mask_conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, False)
self.input_conv.apply(weights_init('kaiming'))
torch.nn.init.constant_(self.mask_conv.weight, 1.0)
for param in self.mask_conv.parameters():
param.requires_grad = False
def forward(self, input_0, input_1):
primals_1 = self.input_conv.weight
primals_4 = self.input_conv.bias
primals_2 = self.mask_conv.weight
primals_3 = input_0
primals_5 = input_1
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1]
| TaroNakasendo/MaskEraser | PartialConv | false | 17,981 | [
"MIT"
] | 3 | 373af686194aff716f53785e40252beae7b26cff | https://github.com/TaroNakasendo/MaskEraser/tree/373af686194aff716f53785e40252beae7b26cff |
NaiveGroupNorm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/ik/ciksr4mu7uc5th32dws4fyvyrjpyueaaiafp6rmlyzm5zj4wy74n.py
# Topologically Sorted Source Nodes: [mean, pow_1, mean_1, pow_2, var, add, std, mul, input_4], Original ATen: [aten.mean, aten.pow, aten.sub, aten.add, aten.sqrt, aten.mul]
# Source node to ATen node mapping:
# add => add
# input_4 => add_1
# mean => mean
# mean_1 => mean_1
# mul => mul
# pow_1 => pow_1
# pow_2 => pow_2
# std => sqrt
# var => sub
# Graph fragment:
# %mean : [num_users=3] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [-1], True), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mean, 2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mean_1, %pow_2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, 1e-05), kwargs = {})
# %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %view_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %view_3), kwargs = {})
triton_per_fused_add_mean_mul_pow_sqrt_sub_0 = async_compile.triton('triton_per_fused_add_mean_mul_pow_sqrt_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_mul_pow_sqrt_sub_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mean_mul_pow_sqrt_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = (rindex // 16)
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp20 = tl.load(in_ptr1 + (r3), None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr2 + (r3), None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = tmp0 * tmp0
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = 64.0
tmp11 = tmp4 / tmp10
tmp12 = tmp9 / tmp10
tmp13 = tmp11 * tmp11
tmp14 = tmp12 - tmp13
tmp15 = 1e-05
tmp16 = tmp14 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp18 = tmp0 - tmp11
tmp19 = tmp18 / tmp17
tmp21 = tmp19 * tmp20
tmp23 = tmp21 + tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp11, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x0), tmp17, xmask)
tl.store(out_ptr0 + (r1 + (64*x0)), tmp23, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf2 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0); del buf0 # reuse
buf3 = reinterpret_tensor(buf2, (4, 1, 1), (1, 1, 1), 0); del buf2 # reuse
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean, pow_1, mean_1, pow_2, var, add, std, mul, input_4], Original ATen: [aten.mean, aten.pow, aten.sub, aten.add, aten.sqrt, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_add_mean_mul_pow_sqrt_sub_0.run(buf1, buf3, primals_1, primals_2, primals_3, buf4, 4, 64, grid=grid(4), stream=stream0)
del primals_2
del primals_3
return (buf4, primals_1, buf1, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import torch
from torch.nn import Parameter
from torch.nn import init
import torch.nn.parallel
class NaiveGroupNorm(Module):
"""NaiveGroupNorm implements Group Normalization with the high-level matrix operations in PyTorch.
It is a temporary solution to export GN by ONNX before the official GN can be exported by ONNX.
The usage of NaiveGroupNorm is exactly the same as the official :class:`torch.nn.GroupNorm`.
Args:
num_groups (int): number of groups to separate the channels into
num_channels (int): number of channels expected in input
eps: a value added to the denominator for numerical stability. Default: 1e-5
affine: a boolean value that when set to ``True``, this module
has learnable per-channel affine parameters initialized to ones (for weights)
and zeros (for biases). Default: ``True``.
Shape:
- Input: :math:`(N, C, *)` where :math:`C=\\text{num\\_channels}`
- Output: :math:`(N, C, *)` (same shape as input)
Examples::
>>> input = torch.randn(20, 6, 10, 10)
>>> # Separate 6 channels into 3 groups
>>> m = NaiveGroupNorm(3, 6)
>>> # Separate 6 channels into 6 groups (equivalent with InstanceNorm)
>>> m = NaiveGroupNorm(6, 6)
>>> # Put all 6 channels into a single group (equivalent with LayerNorm)
>>> m = NaiveGroupNorm(1, 6)
>>> # Activating the module
>>> output = m(input)
.. _`Group Normalization`: https://arxiv.org/abs/1803.08494
"""
__constants__ = ['num_groups', 'num_channels', 'eps', 'affine',
'weight', 'bias']
def __init__(self, num_groups, num_channels, eps=1e-05, affine=True):
super(NaiveGroupNorm, self).__init__()
self.num_groups = num_groups
self.num_channels = num_channels
self.eps = eps
self.affine = affine
if self.affine:
self.weight = Parameter(torch.Tensor(num_channels))
self.bias = Parameter(torch.Tensor(num_channels))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
if self.affine:
init.ones_(self.weight)
init.zeros_(self.bias)
def forward(self, input):
N, C, H, W = input.size()
assert C % self.num_groups == 0
input = input.reshape(N, self.num_groups, -1)
mean = input.mean(dim=-1, keepdim=True)
var = (input ** 2).mean(dim=-1, keepdim=True) - mean ** 2
std = torch.sqrt(var + self.eps)
input = (input - mean) / std
input = input.reshape(N, C, H, W)
if self.affine:
input = input * self.weight.reshape(1, C, 1, 1
) + self.bias.reshape(1, C, 1, 1)
return input
def extra_repr(self):
return ('{num_groups}, {num_channels}, eps={eps}, affine={affine}'.
format(**self.__dict__))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_groups': 1, 'num_channels': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch.nn import Module
from torch.nn import Parameter
from torch.nn import init
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_mean_mul_pow_sqrt_sub_0(in_out_ptr0, in_out_ptr1,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp20 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = tmp0 * tmp0
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = 64.0
tmp11 = tmp4 / tmp10
tmp12 = tmp9 / tmp10
tmp13 = tmp11 * tmp11
tmp14 = tmp12 - tmp13
tmp15 = 1e-05
tmp16 = tmp14 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp18 = tmp0 - tmp11
tmp19 = tmp18 / tmp17
tmp21 = tmp19 * tmp20
tmp23 = tmp21 + tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp11, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp17, xmask)
tl.store(out_ptr0 + (r1 + 64 * x0), tmp23, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf2 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0)
del buf0
buf3 = reinterpret_tensor(buf2, (4, 1, 1), (1, 1, 1), 0)
del buf2
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_mean_mul_pow_sqrt_sub_0[grid(4)](buf1, buf3,
primals_1, primals_2, primals_3, buf4, 4, 64, XBLOCK=1,
num_warps=2, num_stages=1)
del primals_2
del primals_3
return buf4, primals_1, buf1, buf3
class NaiveGroupNormNew(Module):
"""NaiveGroupNorm implements Group Normalization with the high-level matrix operations in PyTorch.
It is a temporary solution to export GN by ONNX before the official GN can be exported by ONNX.
The usage of NaiveGroupNorm is exactly the same as the official :class:`torch.nn.GroupNorm`.
Args:
num_groups (int): number of groups to separate the channels into
num_channels (int): number of channels expected in input
eps: a value added to the denominator for numerical stability. Default: 1e-5
affine: a boolean value that when set to ``True``, this module
has learnable per-channel affine parameters initialized to ones (for weights)
and zeros (for biases). Default: ``True``.
Shape:
- Input: :math:`(N, C, *)` where :math:`C=\\text{num\\_channels}`
- Output: :math:`(N, C, *)` (same shape as input)
Examples::
>>> input = torch.randn(20, 6, 10, 10)
>>> # Separate 6 channels into 3 groups
>>> m = NaiveGroupNorm(3, 6)
>>> # Separate 6 channels into 6 groups (equivalent with InstanceNorm)
>>> m = NaiveGroupNorm(6, 6)
>>> # Put all 6 channels into a single group (equivalent with LayerNorm)
>>> m = NaiveGroupNorm(1, 6)
>>> # Activating the module
>>> output = m(input)
.. _`Group Normalization`: https://arxiv.org/abs/1803.08494
"""
__constants__ = ['num_groups', 'num_channels', 'eps', 'affine',
'weight', 'bias']
def __init__(self, num_groups, num_channels, eps=1e-05, affine=True):
super(NaiveGroupNormNew, self).__init__()
self.num_groups = num_groups
self.num_channels = num_channels
self.eps = eps
self.affine = affine
if self.affine:
self.weight = Parameter(torch.Tensor(num_channels))
self.bias = Parameter(torch.Tensor(num_channels))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
if self.affine:
init.ones_(self.weight)
init.zeros_(self.bias)
def extra_repr(self):
return ('{num_groups}, {num_channels}, eps={eps}, affine={affine}'.
format(**self.__dict__))
def forward(self, input_0):
primals_2 = self.weight
primals_3 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| Tanveer81/BoxVOS | NaiveGroupNorm | false | 17,982 | [
"BSD-2-Clause"
] | 4 | c30aa319f18f3fbee2a25e0ed25cb006a4598300 | https://github.com/Tanveer81/BoxVOS/tree/c30aa319f18f3fbee2a25e0ed25cb006a4598300 |
eSEModule | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/yg/cygooswl5gkxugqq2ejgag2vtcqhtumn2j3notsgzty3xoxbrq4v.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# x => mean
# Graph fragment:
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1, -2], True), kwargs = {})
triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/y4/cy4ly3ilmj5tapwerzwiu6it4hgyex6ime3bsbv3nbhomefegg4p.py
# Topologically Sorted Source Nodes: [x_1, add, relu6, x_2, mul], Original ATen: [aten.convolution, aten.add, aten.hardtanh, aten.div, aten.mul]
# Source node to ATen node mapping:
# add => add
# mul => mul
# relu6 => clamp_max, clamp_min
# x_1 => convolution
# x_2 => div
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mean, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution, 3.0), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 6), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%clamp_max, 6.0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %div), kwargs = {})
triton_poi_fused_add_convolution_div_hardtanh_mul_1 = async_compile.triton('triton_poi_fused_add_convolution_div_hardtanh_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_div_hardtanh_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_div_hardtanh_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = (xindex // 16)
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = 3.0
tmp5 = tmp3 + tmp4
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = 6.0
tmp9 = triton_helpers.minimum(tmp7, tmp8)
tmp10 = 0.16666666666666666
tmp11 = tmp9 * tmp10
tmp12 = tmp0 * tmp11
tl.store(out_ptr0 + (x3), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/qm/cqmw7ubmicijlko6wu74fpj5hgifmp2gxk2sxq2f7dzm46dajm65.py
# Topologically Sorted Source Nodes: [x_1, add], Original ATen: [aten.convolution, aten.add, aten.hardtanh_backward]
# Source node to ATen node mapping:
# add => add
# x_1 => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mean, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution, 3.0), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%add, 0), kwargs = {})
# %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%add, 6), kwargs = {})
# %bitwise_or : [num_users=1] = call_function[target=torch.ops.aten.bitwise_or.Tensor](args = (%le, %ge), kwargs = {})
triton_poi_fused_add_convolution_hardtanh_backward_2 = async_compile.triton('triton_poi_fused_add_convolution_hardtanh_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_hardtanh_backward_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_hardtanh_backward_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 3.0
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tmp7 = 6.0
tmp8 = tmp4 >= tmp7
tmp9 = tmp6 | tmp8
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_0.run(buf1, primals_1, 16, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1, add, relu6, x_2, mul], Original ATen: [aten.convolution, aten.add, aten.hardtanh, aten.div, aten.mul]
triton_poi_fused_add_convolution_div_hardtanh_mul_1.run(primals_1, buf2, primals_3, buf3, 256, grid=grid(256), stream=stream0)
buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1, add], Original ATen: [aten.convolution, aten.add, aten.hardtanh_backward]
triton_poi_fused_add_convolution_hardtanh_backward_2.run(buf2, primals_3, buf4, 16, grid=grid(16), stream=stream0)
del buf2
del primals_3
return (buf3, primals_1, primals_2, buf1, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
import torch.nn.parallel
class Hsigmoid(nn.Module):
def __init__(self, inplace=True):
super(Hsigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return F.relu6(x + 3.0, inplace=self.inplace) / 6.0
class eSEModule(nn.Module):
def __init__(self, channel, reduction=4):
super(eSEModule, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Conv2d(channel, channel, kernel_size=1, padding=0)
self.hsigmoid = Hsigmoid()
def forward(self, x):
input = x
x = self.avg_pool(x)
x = self.fc(x)
x = self.hsigmoid(x)
return input * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channel': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
import torch.nn.functional as F
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_convolution_div_hardtanh_mul_1(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex // 16
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = 3.0
tmp5 = tmp3 + tmp4
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = 6.0
tmp9 = triton_helpers.minimum(tmp7, tmp8)
tmp10 = 0.16666666666666666
tmp11 = tmp9 * tmp10
tmp12 = tmp0 * tmp11
tl.store(out_ptr0 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_add_convolution_hardtanh_backward_2(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 3.0
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tmp7 = 6.0
tmp8 = tmp4 >= tmp7
tmp9 = tmp6 | tmp8
tl.store(out_ptr0 + x2, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_convolution_div_hardtanh_mul_1[grid(256)](
primals_1, buf2, primals_3, buf3, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
triton_poi_fused_add_convolution_hardtanh_backward_2[grid(16)](buf2,
primals_3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf2
del primals_3
return buf3, primals_1, primals_2, buf1, buf4
class Hsigmoid(nn.Module):
def __init__(self, inplace=True):
super(Hsigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return F.relu6(x + 3.0, inplace=self.inplace) / 6.0
class eSEModuleNew(nn.Module):
def __init__(self, channel, reduction=4):
super(eSEModuleNew, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Conv2d(channel, channel, kernel_size=1, padding=0)
self.hsigmoid = Hsigmoid()
def forward(self, input_0):
primals_2 = self.fc.weight
primals_3 = self.fc.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| Tanveer81/BoxVOS | eSEModule | false | 17,983 | [
"BSD-2-Clause"
] | 4 | c30aa319f18f3fbee2a25e0ed25cb006a4598300 | https://github.com/Tanveer81/BoxVOS/tree/c30aa319f18f3fbee2a25e0ed25cb006a4598300 |
GCN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/ej/cejbaw3u6a6ms3irj4kumrcxpifvb2l5rykaqk7fbzej6akuba3y.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.constant_pad_nd]
# Source node to ATen node mapping:
# x => constant_pad_nd
# Graph fragment:
# %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [0, 0, 1, 1], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 6
x2 = (xindex // 24)
x3 = xindex % 24
x4 = xindex
tmp0 = (-1) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + ((-4) + x3 + (16*x2)), tmp5 & xmask, other=0.0)
tl.store(out_ptr0 + (x4), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/7r/c7rf7t47ugrrljnac7yn3tts4ejnsegxpwszwvc6aw6xc4esqj7a.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.constant_pad_nd]
# Source node to ATen node mapping:
# x_1 => convolution
# x_2 => constant_pad_nd_1
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %constant_pad_nd_1 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%convolution, [1, 1, 0, 0], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_convolution_1 = async_compile.triton('triton_poi_fused_constant_pad_nd_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_convolution_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_convolution_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x4 = (xindex // 6)
x2 = (xindex // 24) % 4
x5 = xindex
tmp0 = (-1) + x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + ((-1) + x0 + (4*x4)), tmp5 & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + (x2), tmp5 & xmask, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp5, tmp8, tmp9)
tl.store(out_ptr0 + (x5), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/rz/crzz77zyrjeawdii7kpwczc66hufsoxl7t6zpm5lr2jvxmthziwx.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.constant_pad_nd]
# Source node to ATen node mapping:
# x_4 => constant_pad_nd_2
# Graph fragment:
# %constant_pad_nd_2 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [1, 1, 0, 0], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_2 = async_compile.triton('triton_poi_fused_constant_pad_nd_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = (xindex // 6)
x2 = xindex
tmp0 = (-1) + x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + ((-1) + x0 + (4*x1)), tmp5 & xmask, other=0.0)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/7f/c7fv2odkmldcjl5nuu5vkxh3lyr6vlkp4rn6frplmkxdomm2qqss.py
# Topologically Sorted Source Nodes: [x_5, x_6], Original ATen: [aten.convolution, aten.constant_pad_nd]
# Source node to ATen node mapping:
# x_5 => convolution_2
# x_6 => constant_pad_nd_3
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd_2, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %constant_pad_nd_3 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%convolution_2, [0, 0, 1, 1], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_convolution_3 = async_compile.triton('triton_poi_fused_constant_pad_nd_convolution_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_convolution_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_convolution_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 6
x4 = (xindex // 24)
x5 = xindex % 24
x2 = (xindex // 24) % 4
x6 = xindex
tmp0 = (-1) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + ((-4) + x5 + (16*x4)), tmp5 & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + (x2), tmp5 & xmask, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp5, tmp8, tmp9)
tl.store(out_ptr0 + (x6), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/qs/cqss3i54iwr5rty66bqdpmwvq6qebsg2m22uzbv5qrpwikwr25fk.py
# Topologically Sorted Source Nodes: [x_3, x_7, out], Original ATen: [aten.convolution, aten.add]
# Source node to ATen node mapping:
# out => add
# x_3 => convolution_1
# x_7 => convolution_3
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd_1, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd_3, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %convolution_3), kwargs = {})
triton_poi_fused_add_convolution_4 = async_compile.triton('triton_poi_fused_add_convolution_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x3), xmask)
tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 1), (12, 3, 1, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 1, 3), (12, 3, 3, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 1, 3), (12, 3, 3, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4, 3, 1), (12, 3, 1, 1))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 6, 4), (96, 24, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.constant_pad_nd]
stream0 = get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0.run(primals_1, buf0, 384, grid=grid(384), stream=stream0)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4, 6), (96, 24, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.constant_pad_nd]
triton_poi_fused_constant_pad_nd_convolution_1.run(buf1, primals_3, buf2, 384, grid=grid(384), stream=stream0)
del buf1
del primals_3
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = empty_strided_cuda((4, 4, 4, 6), (96, 24, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.constant_pad_nd]
triton_poi_fused_constant_pad_nd_2.run(primals_1, buf4, 384, grid=grid(384), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 4, 4), (64, 16, 4, 1))
buf6 = empty_strided_cuda((4, 4, 6, 4), (96, 24, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5, x_6], Original ATen: [aten.convolution, aten.constant_pad_nd]
triton_poi_fused_constant_pad_nd_convolution_3.run(buf5, primals_7, buf6, 384, grid=grid(384), stream=stream0)
del buf5
del primals_7
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf6, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1))
buf8 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [x_3, x_7, out], Original ATen: [aten.convolution, aten.add]
triton_poi_fused_add_convolution_4.run(buf8, primals_5, buf7, primals_9, 256, grid=grid(256), stream=stream0)
del buf7
del primals_5
del primals_9
return (buf8, primals_2, primals_4, primals_6, primals_8, buf0, buf2, buf4, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 1), (12, 3, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1, 3), (12, 3, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 1, 3), (12, 3, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4, 3, 1), (12, 3, 1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
import torch.nn.parallel
class Conv2D(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, padding=
'same', stride=1, dilation=1, groups=1):
super(Conv2D, self).__init__()
assert type(kernel_size) in [int, tuple
], 'Allowed kernel type [int or tuple], not {}'.format(type(
kernel_size))
assert padding == 'same', 'Allowed padding type {}, not {}'.format(
'same', padding)
self.kernel_size = kernel_size
if isinstance(kernel_size, tuple):
self.h_kernel = kernel_size[0]
self.w_kernel = kernel_size[1]
else:
self.h_kernel = kernel_size
self.w_kernel = kernel_size
self.padding = padding
self.stride = stride
self.dilation = dilation
self.groups = groups
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel_size, stride=self.stride,
dilation=self.dilation, groups=self.groups)
def forward(self, x):
if self.padding == 'same':
height, width = x.shape[2:]
h_pad_need = max(0, (height - 1) * self.stride + self.h_kernel -
height)
w_pad_need = max(0, (width - 1) * self.stride + self.w_kernel -
width)
pad_left = w_pad_need // 2
pad_right = w_pad_need - pad_left
pad_top = h_pad_need // 2
pad_bottom = h_pad_need - pad_top
padding = pad_left, pad_right, pad_top, pad_bottom
x = F.pad(x, padding, 'constant', 0)
x = self.conv(x)
return x
class GCN(nn.Module):
"""
Large Kernel Matters -- https://arxiv.org/abs/1703.02719
"""
def __init__(self, in_channels, out_channels, k=3):
super(GCN, self).__init__()
self.conv_l1 = Conv2D(in_channels=in_channels, out_channels=
out_channels, kernel_size=(k, 1), padding='same')
self.conv_l2 = Conv2D(in_channels=out_channels, out_channels=
out_channels, kernel_size=(1, k), padding='same')
self.conv_r1 = Conv2D(in_channels=in_channels, out_channels=
out_channels, kernel_size=(1, k), padding='same')
self.conv_r2 = Conv2D(in_channels=out_channels, out_channels=
out_channels, kernel_size=(k, 1), padding='same')
def forward(self, x):
x1 = self.conv_l1(x)
x1 = self.conv_l2(x1)
x2 = self.conv_r1(x)
x2 = self.conv_r2(x2)
out = x1 + x2
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.nn.functional as F
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 6
x2 = xindex // 24
x3 = xindex % 24
x4 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (-4 + x3 + 16 * x2), tmp5 & xmask, other=0.0)
tl.store(out_ptr0 + x4, tmp6, xmask)
@triton.jit
def triton_poi_fused_constant_pad_nd_convolution_1(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x4 = xindex // 6
x2 = xindex // 24 % 4
x5 = xindex
tmp0 = -1 + x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (-1 + x0 + 4 * x4), tmp5 & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + x2, tmp5 & xmask, eviction_policy='evict_last',
other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp5, tmp8, tmp9)
tl.store(out_ptr0 + x5, tmp10, xmask)
@triton.jit
def triton_poi_fused_constant_pad_nd_2(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6
x2 = xindex
tmp0 = -1 + x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1), tmp5 & xmask, other=0.0)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_constant_pad_nd_convolution_3(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 6
x4 = xindex // 24
x5 = xindex % 24
x2 = xindex // 24 % 4
x6 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (-4 + x5 + 16 * x4), tmp5 & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + x2, tmp5 & xmask, eviction_policy='evict_last',
other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp5, tmp8, tmp9)
tl.store(out_ptr0 + x6, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_convolution_4(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x3, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 1), (12, 3, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 1, 3), (12, 3, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 1, 3), (12, 3, 3, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 3, 1), (12, 3, 1, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 6, 4), (96, 24, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(384)](primals_1, buf0, 384,
XBLOCK=128, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4, 6), (96, 24, 6, 1), torch.float32)
triton_poi_fused_constant_pad_nd_convolution_1[grid(384)](buf1,
primals_3, buf2, 384, XBLOCK=128, num_warps=4, num_stages=1)
del buf1
del primals_3
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = empty_strided_cuda((4, 4, 4, 6), (96, 24, 6, 1), torch.float32)
triton_poi_fused_constant_pad_nd_2[grid(384)](primals_1, buf4, 384,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf5 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 4, 4), (64, 16, 4, 1))
buf6 = empty_strided_cuda((4, 4, 6, 4), (96, 24, 4, 1), torch.float32)
triton_poi_fused_constant_pad_nd_convolution_3[grid(384)](buf5,
primals_7, buf6, 384, XBLOCK=128, num_warps=4, num_stages=1)
del buf5
del primals_7
buf7 = extern_kernels.convolution(buf6, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1))
buf8 = buf3
del buf3
triton_poi_fused_add_convolution_4[grid(256)](buf8, primals_5, buf7,
primals_9, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf7
del primals_5
del primals_9
return (buf8, primals_2, primals_4, primals_6, primals_8, buf0, buf2,
buf4, buf6)
class Conv2D(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, padding=
'same', stride=1, dilation=1, groups=1):
super(Conv2D, self).__init__()
assert type(kernel_size) in [int, tuple
], 'Allowed kernel type [int or tuple], not {}'.format(type(
kernel_size))
assert padding == 'same', 'Allowed padding type {}, not {}'.format(
'same', padding)
self.kernel_size = kernel_size
if isinstance(kernel_size, tuple):
self.h_kernel = kernel_size[0]
self.w_kernel = kernel_size[1]
else:
self.h_kernel = kernel_size
self.w_kernel = kernel_size
self.padding = padding
self.stride = stride
self.dilation = dilation
self.groups = groups
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel_size, stride=self.stride,
dilation=self.dilation, groups=self.groups)
def forward(self, x):
if self.padding == 'same':
height, width = x.shape[2:]
h_pad_need = max(0, (height - 1) * self.stride + self.h_kernel -
height)
w_pad_need = max(0, (width - 1) * self.stride + self.w_kernel -
width)
pad_left = w_pad_need // 2
pad_right = w_pad_need - pad_left
pad_top = h_pad_need // 2
pad_bottom = h_pad_need - pad_top
padding = pad_left, pad_right, pad_top, pad_bottom
x = F.pad(x, padding, 'constant', 0)
x = self.conv(x)
return x
class GCNNew(nn.Module):
"""
Large Kernel Matters -- https://arxiv.org/abs/1703.02719
"""
def __init__(self, in_channels, out_channels, k=3):
super(GCNNew, self).__init__()
self.conv_l1 = Conv2D(in_channels=in_channels, out_channels=
out_channels, kernel_size=(k, 1), padding='same')
self.conv_l2 = Conv2D(in_channels=out_channels, out_channels=
out_channels, kernel_size=(1, k), padding='same')
self.conv_r1 = Conv2D(in_channels=in_channels, out_channels=
out_channels, kernel_size=(1, k), padding='same')
self.conv_r2 = Conv2D(in_channels=out_channels, out_channels=
out_channels, kernel_size=(k, 1), padding='same')
def forward(self, input_0):
primals_2 = self.conv_l1.conv.weight
primals_3 = self.conv_l1.conv.bias
primals_4 = self.conv_l2.conv.weight
primals_5 = self.conv_l2.conv.bias
primals_6 = self.conv_r1.conv.weight
primals_7 = self.conv_r1.conv.bias
primals_8 = self.conv_r2.conv.weight
primals_9 = self.conv_r2.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| Tanveer81/BoxVOS | GCN | false | 17,984 | [
"BSD-2-Clause"
] | 4 | c30aa319f18f3fbee2a25e0ed25cb006a4598300 | https://github.com/Tanveer81/BoxVOS/tree/c30aa319f18f3fbee2a25e0ed25cb006a4598300 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.