entry_point
stringlengths 1
65
| original_triton_code
stringlengths 4.5k
619k
| python_code
stringlengths 208
60.9k
| triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
sequencelengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
|
---|---|---|---|---|---|---|---|---|---|---|---|
GatedResUnit | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/va/cvasqctjzjefeflnuotcxa2mrhanju6357ukdrxur3i4qmmnupq4.py
# Topologically Sorted Source Nodes: [h, conv2d_1, g, h1], Original ATen: [aten.convolution, aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# g => sigmoid
# h => convolution
# h1 => mul
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_1,), kwargs = {})
# %mul : [num_users=3] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, %sigmoid), kwargs = {})
triton_poi_fused_convolution_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_convolution_mul_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_mul_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_mul_sigmoid_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr1 + (x3), xmask)
tmp4 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tl.sigmoid(tmp5)
tmp7 = tmp2 * tmp6
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
tl.store(in_out_ptr1 + (x3), tmp5, xmask)
tl.store(out_ptr0 + (x3), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/fw/cfwyzalciavhzqbgdliew5gcmomnbat4salq3w4w3tsiib6y2pws.py
# Topologically Sorted Source Nodes: [h_1, conv2d_3, g_1, h2, add], Original ATen: [aten.convolution, aten.sigmoid, aten.mul, aten.add]
# Source node to ATen node mapping:
# add => add
# conv2d_3 => convolution_3
# g_1 => sigmoid_1
# h2 => mul_1
# h_1 => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%mul, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %convolution_3 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%mul, %primals_8, %primals_9, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %sigmoid_1 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_3,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, %sigmoid_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {})
triton_poi_fused_add_convolution_mul_sigmoid_1 = async_compile.triton('triton_poi_fused_add_convolution_mul_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_mul_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_mul_sigmoid_1(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr1 + (x3), xmask)
tmp4 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (x3), xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tl.sigmoid(tmp5)
tmp7 = tmp2 * tmp6
tmp9 = tmp7 + tmp8
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
tl.store(in_out_ptr1 + (x3), tmp5, xmask)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
buf3 = buf2; del buf2 # reuse
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h, conv2d_1, g, h1], Original ATen: [aten.convolution, aten.sigmoid, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_mul_sigmoid_0.run(buf1, buf3, primals_2, primals_5, buf4, 256, grid=grid(256), stream=stream0)
del primals_2
del primals_5
# Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 4, 4), (64, 16, 4, 1))
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf4, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1))
buf6 = buf5; del buf5 # reuse
buf8 = buf7; del buf7 # reuse
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_1, conv2d_3, g_1, h2, add], Original ATen: [aten.convolution, aten.sigmoid, aten.mul, aten.add]
triton_poi_fused_add_convolution_mul_sigmoid_1.run(buf6, buf8, primals_7, primals_9, primals_3, buf9, 256, grid=grid(256), stream=stream0)
del primals_7
del primals_9
return (buf9, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf3, buf4, buf6, buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch.nn as nn
class GatedConv2d(nn.Module):
def __init__(self, input_channels, output_channels, kernel_size, stride,
padding, dilation=1, activation=None):
super(GatedConv2d, self).__init__()
self.activation = activation
self.sigmoid = nn.Sigmoid()
self.h = nn.Conv2d(input_channels, output_channels, kernel_size,
stride, padding, dilation)
self.g = nn.Conv2d(input_channels, output_channels, kernel_size,
stride, padding, dilation)
def forward(self, x):
if self.activation is None:
h = self.h(x)
else:
h = self.activation(self.h(x))
g = self.sigmoid(self.g(x))
return h * g
class GatedResUnit(nn.Module):
def __init__(self, input_channels, activation=None):
super(GatedResUnit, self).__init__()
self.activation = activation
self.conv1 = GatedConv2d(input_channels, input_channels, 3, 1, 1, 1,
activation=activation)
self.conv2 = GatedConv2d(input_channels, input_channels, 3, 1, 1, 1,
activation=activation)
def forward(self, x):
h1 = self.conv1(x)
h2 = self.conv2(h1)
return h2 + x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_mul_sigmoid_0(in_out_ptr0, in_out_ptr1,
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr1 + x3, xmask)
tmp4 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tl.sigmoid(tmp5)
tmp7 = tmp2 * tmp6
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(in_out_ptr1 + x3, tmp5, xmask)
tl.store(out_ptr0 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_add_convolution_mul_sigmoid_1(in_out_ptr0, in_out_ptr1,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr1 + x3, xmask)
tmp4 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tl.sigmoid(tmp5)
tmp7 = tmp2 * tmp6
tmp9 = tmp7 + tmp8
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(in_out_ptr1 + x3, tmp5, xmask)
tl.store(out_ptr0 + x3, tmp9, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_mul_sigmoid_0[grid(256)](buf1, buf3,
primals_2, primals_5, buf4, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_2
del primals_5
buf5 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = extern_kernels.convolution(buf4, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1))
buf6 = buf5
del buf5
buf8 = buf7
del buf7
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_convolution_mul_sigmoid_1[grid(256)](buf6,
buf8, primals_7, primals_9, primals_3, buf9, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_7
del primals_9
return (buf9, primals_1, primals_3, primals_4, primals_6, primals_8,
buf1, buf3, buf4, buf6, buf8)
class GatedConv2d(nn.Module):
def __init__(self, input_channels, output_channels, kernel_size, stride,
padding, dilation=1, activation=None):
super(GatedConv2d, self).__init__()
self.activation = activation
self.sigmoid = nn.Sigmoid()
self.h = nn.Conv2d(input_channels, output_channels, kernel_size,
stride, padding, dilation)
self.g = nn.Conv2d(input_channels, output_channels, kernel_size,
stride, padding, dilation)
def forward(self, x):
if self.activation is None:
h = self.h(x)
else:
h = self.activation(self.h(x))
g = self.sigmoid(self.g(x))
return h * g
class GatedResUnitNew(nn.Module):
def __init__(self, input_channels, activation=None):
super(GatedResUnitNew, self).__init__()
self.activation = activation
self.conv1 = GatedConv2d(input_channels, input_channels, 3, 1, 1, 1,
activation=activation)
self.conv2 = GatedConv2d(input_channels, input_channels, 3, 1, 1, 1,
activation=activation)
def forward(self, input_0):
primals_1 = self.conv1.h.weight
primals_2 = self.conv1.h.bias
primals_4 = self.conv1.g.weight
primals_5 = self.conv1.g.bias
primals_6 = self.conv2.h.weight
primals_7 = self.conv2.h.bias
primals_8 = self.conv2.g.weight
primals_9 = self.conv2.g.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| sanghiad/vae_vampprior | GatedResUnit | false | 16,364 | [
"MIT"
] | 218 | d24bc0c8781b7ee7b9570c2d560e43bceff50da4 | https://github.com/sanghiad/vae_vampprior/tree/d24bc0c8781b7ee7b9570c2d560e43bceff50da4 |
GaussianLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/xo/cxobgsd77zen2bgj6dyyb7jt6nnnvsqf7diciywdc7qeuwoug2zo.py
# Topologically Sorted Source Nodes: [log, term1, sub, pow_1, mul, term2, add, mean_1], Original ATen: [aten.log, aten.div, aten.sub, aten.pow, aten.mul, aten.add, aten.mean]
# Source node to ATen node mapping:
# add => add
# log => log
# mean_1 => mean
# mul => mul
# pow_1 => pow_1
# sub => sub
# term1 => div
# term2 => div_1
# Graph fragment:
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%slice_4,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%log, 2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %slice_2), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%slice_4, 2), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%pow_1, %mul), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %div_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%add,), kwargs = {})
triton_per_fused_add_div_log_mean_mul_pow_sub_0 = async_compile.triton('triton_per_fused_add_div_log_mean_mul_pow_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 128],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_log_mean_mul_pow_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_log_mean_mul_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 128
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 32
r1 = (rindex // 32)
r2 = rindex
tmp0 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None)
tmp4 = tl.load(in_ptr1 + (r2), None)
tmp5 = tl.load(in_ptr0 + (r0 + (64*r1)), None)
tmp1 = tl_math.log(tmp0)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = 2.0
tmp9 = tmp0 * tmp8
tmp10 = tmp7 / tmp9
tmp11 = tmp3 + tmp10
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.sum(tmp12, 1)[:, None]
tmp15 = 128.0
tmp16 = tmp14 / tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp16, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 2, 4, 4), (32, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [log, term1, sub, pow_1, mul, term2, add, mean_1], Original ATen: [aten.log, aten.div, aten.sub, aten.pow, aten.mul, aten.add, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_log_mean_mul_pow_sub_0.run(buf1, arg0_1, arg1_1, 1, 128, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 2, 4, 4), (32, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class GaussianLoss(torch.nn.Module):
"""
Gaussian log-likelihood loss. It assumes targets `y` with n rows and d
columns, but estimates `yhat` with n rows and 2d columns. The columns 0:d
of `yhat` contain estimated means, the columns d:2*d of `yhat` contain
estimated variances. This module assumes that the estimated variances are
positive---for numerical stability, it is recommended that the minimum
estimated variance is greater than a small number (1e-3).
"""
def __init__(self):
super(GaussianLoss, self).__init__()
def forward(self, yhat, y):
dim = yhat.size(1) // 2
mean = yhat[:, :dim]
variance = yhat[:, dim:]
term1 = variance.log().div(2)
term2 = (y - mean).pow(2).div(variance.mul(2))
return (term1 + term2).mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 2, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_log_mean_mul_pow_sub_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 32
r1 = rindex // 32
r2 = rindex
tmp0 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp4 = tl.load(in_ptr1 + r2, None)
tmp5 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = tl_math.log(tmp0)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = 2.0
tmp9 = tmp0 * tmp8
tmp10 = tmp7 / tmp9
tmp11 = tmp3 + tmp10
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.sum(tmp12, 1)[:, None]
tmp15 = 128.0
tmp16 = tmp14 / tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp16, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 2, 4, 4), (32, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_log_mean_mul_pow_sub_0[grid(1)](buf1,
arg0_1, arg1_1, 1, 128, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class GaussianLossNew(torch.nn.Module):
"""
Gaussian log-likelihood loss. It assumes targets `y` with n rows and d
columns, but estimates `yhat` with n rows and 2d columns. The columns 0:d
of `yhat` contain estimated means, the columns d:2*d of `yhat` contain
estimated variances. This module assumes that the estimated variances are
positive---for numerical stability, it is recommended that the minimum
estimated variance is greater than a small number (1e-3).
"""
def __init__(self):
super(GaussianLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| scottgigante-immunai/CPA | GaussianLoss | false | 16,365 | [
"MIT"
] | 132 | 9338ede503d36c6163a521bee904aa93d896ef92 | https://github.com/scottgigante-immunai/CPA/tree/9338ede503d36c6163a521bee904aa93d896ef92 |
FeedForward | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/hp/chpdwpegv6lvistek2wqgimtufecqvfp6grp5rpblk5yjicjzqd2.py
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/lh/clhh73owbiuj4adasmetdqsot2nlmw2ljupnw2q4yt3du76mikww.py
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_2), kwargs = {})
triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/gm/cgmflgdlpeeb52xctoa47uvw47ycyf7ahlj5wdscxdatpbwcboco.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/b5/cb5kfengtwtso2cktophcv2j56lrz4uz52si4koheb5liewfkvtt.py
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add => add_2
# Graph fragment:
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %view_3), kwargs = {})
triton_poi_fused_add_3 = async_compile.triton('triton_poi_fused_add_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(primals_3, buf0, buf1, 64, grid=grid(64), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(primals_3, buf0, buf1, primals_1, primals_2, buf2, 256, grid=grid(256), stream=stream0)
del buf0
del buf1
del primals_1
del primals_2
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_2.run(buf4, primals_5, buf7, 256, grid=grid(256), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
triton_poi_fused_add_3.run(buf6, primals_3, primals_7, 256, grid=grid(256), stream=stream0)
del primals_7
return (buf6, primals_3, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(buf4, (64, 4), (4, 1), 0), primals_6, buf7, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.cuda
import torch.distributed
class FeedForward(torch.nn.Module):
def __init__(self, input_size, hidden_size, dropout):
super().__init__()
self.linear1 = torch.nn.Linear(input_size, hidden_size)
self.linear2 = torch.nn.Linear(hidden_size, input_size)
self.dropout = torch.nn.Dropout(dropout)
self.norm = torch.nn.LayerNorm(input_size)
def forward(self, src):
ret = self.linear1(self.norm(src))
ret = self.linear2(self.dropout(torch.nn.functional.relu(ret)))
return src + self.dropout(ret)
def update_dropout(self, dropout):
self.dropout.p = dropout
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'dropout': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.cuda
import torch.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(64)](primals_3, buf0,
buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(256)](primals_3, buf0,
buf1, primals_1, primals_2, buf2, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf0
del buf1
del primals_1
del primals_2
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(256)](buf4,
primals_5, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_add_3[grid(256)](buf6, primals_3, primals_7, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
return buf6, primals_3, reinterpret_tensor(buf2, (64, 4), (4, 1), 0
), reinterpret_tensor(buf4, (64, 4), (4, 1), 0
), primals_6, buf7, primals_4
class FeedForwardNew(torch.nn.Module):
def __init__(self, input_size, hidden_size, dropout):
super().__init__()
self.linear1 = torch.nn.Linear(input_size, hidden_size)
self.linear2 = torch.nn.Linear(hidden_size, input_size)
self.dropout = torch.nn.Dropout(dropout)
self.norm = torch.nn.LayerNorm(input_size)
def update_dropout(self, dropout):
self.dropout.p = dropout
def forward(self, input_0):
primals_4 = self.linear1.weight
primals_1 = self.linear1.bias
primals_6 = self.linear2.weight
primals_2 = self.linear2.bias
primals_5 = self.norm.weight
primals_7 = self.norm.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| sakrnference/data-to-text-hierarchical | FeedForward | false | 16,366 | [
"Apache-2.0"
] | 82 | 09b8fa8bf85385f25348378a30e830d425c93db3 | https://github.com/sakrnference/data-to-text-hierarchical/tree/09b8fa8bf85385f25348378a30e830d425c93db3 |
NBLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/p3/cp3hqebbduk3x6luzssht64cs2nkmq4l7ssi2r54szgrylodxxc5.py
# Topologically Sorted Source Nodes: [add, lgamma, add_1, lgamma_1, add_2, add_3, add_4, lgamma_2, t1, add_5, add_6, truediv, add_7, log, mul, add_8, log_1, add_9, log_2, sub_1, mul_1, t2, final, isnan, add_12, final_1, mean], Original ATen: [aten.add, aten.lgamma, aten.sub, aten.div, aten.log, aten.mul, aten.isnan, aten.where, aten.mean]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_12 => full_default
# add_2 => add_2
# add_3 => add_3
# add_4 => add_4
# add_5 => add_5
# add_6 => add_6
# add_7 => add_7
# add_8 => add_8
# add_9 => add_9
# final => add_11
# final_1 => where
# isnan => isnan
# lgamma => lgamma
# lgamma_1 => lgamma_1
# lgamma_2 => lgamma_2
# log => log
# log_1 => log_1
# log_2 => log_2
# mean => mean
# mul => mul
# mul_1 => mul_1
# sub_1 => sub_1
# t1 => sub
# t2 => add_10
# truediv => div
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_4, 1e-08), kwargs = {})
# %lgamma : [num_users=1] = call_function[target=torch.ops.aten.lgamma.default](args = (%add,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, 1.0), kwargs = {})
# %lgamma_1 : [num_users=1] = call_function[target=torch.ops.aten.lgamma.default](args = (%add_1,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%lgamma, %lgamma_1), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, %slice_4), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, 1e-08), kwargs = {})
# %lgamma_2 : [num_users=1] = call_function[target=torch.ops.aten.lgamma.default](args = (%add_4,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %lgamma_2), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_4, %arg1_1), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_4, 1e-08), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%slice_2, %add_6), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, 1.0), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_7,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_5, %log), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_4, 1e-08), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_8,), kwargs = {})
# %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_2, 1e-08), kwargs = {})
# %log_2 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_9,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%log_1, %log_2), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %sub_1), kwargs = {})
# %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %add_11 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, %add_10), kwargs = {})
# %isnan : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%add_11,), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 2, 4, 4], inf), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%isnan, %full_default, %add_11), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%where,), kwargs = {})
triton_per_fused_add_div_isnan_lgamma_log_mean_mul_sub_where_0 = async_compile.triton('triton_per_fused_add_div_isnan_lgamma_log_mean_mul_sub_where_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 128],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_isnan_lgamma_log_mean_mul_sub_where_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_isnan_lgamma_log_mean_mul_sub_where_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 128
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 32
r1 = (rindex // 32)
r2 = rindex
tmp0 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None)
tmp4 = tl.load(in_ptr1 + (r2), None)
tmp14 = tl.load(in_ptr0 + (r0 + (64*r1)), None)
tmp1 = 1e-08
tmp2 = tmp0 + tmp1
tmp3 = libdevice.lgamma(tmp2)
tmp5 = 1.0
tmp6 = tmp4 + tmp5
tmp7 = libdevice.lgamma(tmp6)
tmp8 = tmp3 + tmp7
tmp9 = tmp4 + tmp0
tmp10 = tmp9 + tmp1
tmp11 = libdevice.lgamma(tmp10)
tmp12 = tmp8 - tmp11
tmp13 = tmp0 + tmp4
tmp15 = tmp14 / tmp2
tmp16 = tmp15 + tmp5
tmp17 = tl_math.log(tmp16)
tmp18 = tmp13 * tmp17
tmp19 = tl_math.log(tmp2)
tmp20 = tmp14 + tmp1
tmp21 = tl_math.log(tmp20)
tmp22 = tmp19 - tmp21
tmp23 = tmp4 * tmp22
tmp24 = tmp18 + tmp23
tmp25 = tmp12 + tmp24
tmp26 = libdevice.isnan(tmp25).to(tl.int1)
tmp27 = float("inf")
tmp28 = tl.where(tmp26, tmp27, tmp25)
tmp29 = tl.broadcast_to(tmp28, [XBLOCK, RBLOCK])
tmp31 = tl.sum(tmp29, 1)[:, None]
tmp32 = 128.0
tmp33 = tmp31 / tmp32
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp33, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 2, 4, 4), (32, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [add, lgamma, add_1, lgamma_1, add_2, add_3, add_4, lgamma_2, t1, add_5, add_6, truediv, add_7, log, mul, add_8, log_1, add_9, log_2, sub_1, mul_1, t2, final, isnan, add_12, final_1, mean], Original ATen: [aten.add, aten.lgamma, aten.sub, aten.div, aten.log, aten.mul, aten.isnan, aten.where, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_isnan_lgamma_log_mean_mul_sub_where_0.run(buf1, arg0_1, arg1_1, 1, 128, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 2, 4, 4), (32, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
def _nan2inf(x):
return torch.where(torch.isnan(x), torch.zeros_like(x) + np.inf, x)
class NBLoss(torch.nn.Module):
def __init__(self):
super(NBLoss, self).__init__()
def forward(self, yhat, y, eps=1e-08):
"""Negative binomial log-likelihood loss. It assumes targets `y` with n
rows and d columns, but estimates `yhat` with n rows and 2d columns.
The columns 0:d of `yhat` contain estimated means, the columns d:2*d of
`yhat` contain estimated variances. This module assumes that the
estimated mean and inverse dispersion are positive---for numerical
stability, it is recommended that the minimum estimated variance is
greater than a small number (1e-3).
Parameters
----------
yhat: Tensor
Torch Tensor of reeconstructed data.
y: Tensor
Torch Tensor of ground truth data.
eps: Float
numerical stability constant.
"""
dim = yhat.size(1) // 2
mu = yhat[:, :dim]
theta = yhat[:, dim:]
if theta.ndimension() == 1:
theta = theta.view(1, theta.size(0))
t1 = torch.lgamma(theta + eps) + torch.lgamma(y + 1.0) - torch.lgamma(
y + theta + eps)
t2 = (theta + y) * torch.log(1.0 + mu / (theta + eps)) + y * (torch
.log(theta + eps) - torch.log(mu + eps))
final = t1 + t2
final = _nan2inf(final)
return torch.mean(final)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 2, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import numpy as np
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_isnan_lgamma_log_mean_mul_sub_where_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 32
r1 = rindex // 32
r2 = rindex
tmp0 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp4 = tl.load(in_ptr1 + r2, None)
tmp14 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = 1e-08
tmp2 = tmp0 + tmp1
tmp3 = libdevice.lgamma(tmp2)
tmp5 = 1.0
tmp6 = tmp4 + tmp5
tmp7 = libdevice.lgamma(tmp6)
tmp8 = tmp3 + tmp7
tmp9 = tmp4 + tmp0
tmp10 = tmp9 + tmp1
tmp11 = libdevice.lgamma(tmp10)
tmp12 = tmp8 - tmp11
tmp13 = tmp0 + tmp4
tmp15 = tmp14 / tmp2
tmp16 = tmp15 + tmp5
tmp17 = tl_math.log(tmp16)
tmp18 = tmp13 * tmp17
tmp19 = tl_math.log(tmp2)
tmp20 = tmp14 + tmp1
tmp21 = tl_math.log(tmp20)
tmp22 = tmp19 - tmp21
tmp23 = tmp4 * tmp22
tmp24 = tmp18 + tmp23
tmp25 = tmp12 + tmp24
tmp26 = libdevice.isnan(tmp25).to(tl.int1)
tmp27 = float('inf')
tmp28 = tl.where(tmp26, tmp27, tmp25)
tmp29 = tl.broadcast_to(tmp28, [XBLOCK, RBLOCK])
tmp31 = tl.sum(tmp29, 1)[:, None]
tmp32 = 128.0
tmp33 = tmp31 / tmp32
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp33, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 2, 4, 4), (32, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_isnan_lgamma_log_mean_mul_sub_where_0[grid(1)
](buf1, arg0_1, arg1_1, 1, 128, XBLOCK=1, num_warps=2, num_stages=1
)
del arg0_1
del arg1_1
return buf1,
def _nan2inf(x):
return torch.where(torch.isnan(x), torch.zeros_like(x) + np.inf, x)
class NBLossNew(torch.nn.Module):
def __init__(self):
super(NBLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| scottgigante-immunai/CPA | NBLoss | false | 16,367 | [
"MIT"
] | 132 | 9338ede503d36c6163a521bee904aa93d896ef92 | https://github.com/scottgigante-immunai/CPA/tree/9338ede503d36c6163a521bee904aa93d896ef92 |
VGG_16 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/f7/cf7tayhctr3m6ezk7xezotpdlc5h4drokdkbz4vy2pfkbdxnmn4q.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 192
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (27*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5b/c5brnjme4e4oybuabwsko4vuljormwjqoawce7jgxo5fbkhzx55r.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 12
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xq/cxq75w43anllid5ys7ss3yyizuoeph3vvaqlvm5lo434hrywtyle.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4096
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/nw/cnwm6ljuusoqjcwr2jdx6p2ue7ldghxjdr3oe62stiuqhsboiczy.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8192
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/32/c32xiwptfqtyhbnde262mvq5tzywzo6zquurttkv7sztqnze6yni.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16384
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = (yindex // 128)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (128*x2) + (1152*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/jj/cjjz4tpbucpuc3faa2ky32crfwhb5fbnssd6o2yfkgdcjg2acfmo.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_5 = async_compile.triton('triton_poi_fused_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 32768
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = (yindex // 128)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (128*x2) + (1152*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/tg/ctgdsxjd3rciejxtjvi3y2w5fmmggh5lm3mivuygvkdzeb3zulmc.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_6 = async_compile.triton('triton_poi_fused_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 65536
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = (yindex // 256)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/e7/ce7jqsdrj5poslb2hpufqd2wdux5xiab5n2auqal3ztzvkzrmnzl.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_7 = async_compile.triton('triton_poi_fused_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 131072
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = (yindex // 256)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ks/ckso6iiq5yfqfxmx7ilr6ufrmz6mlkiy75pexzhyf3ierq4pu3zl.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_8 = async_compile.triton('triton_poi_fused_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 262144
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = (yindex // 512)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (512*x2) + (4608*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/rv/crv3uzu52jbc4u62gio2klk6cj5xhjt7yazr75tq67kvtteddsn5.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_9 = async_compile.triton('triton_poi_fused_convolution_relu_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ku/ckuscrbyawdttbdara4zmhmq3lgm6lvxmizlt7j4v446lfogr7ah.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_2 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_10 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 64
x1 = (xindex // 64) % 32
x2 = (xindex // 2048)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (128*x1) + (8192*x2)), None)
tmp1 = tl.load(in_ptr0 + (64 + x0 + (128*x1) + (8192*x2)), None)
tmp3 = tl.load(in_ptr0 + (4096 + x0 + (128*x1) + (8192*x2)), None)
tmp5 = tl.load(in_ptr0 + (4160 + x0 + (128*x1) + (8192*x2)), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3), tmp6, None)
tl.store(out_ptr1 + (x3), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ws/cwssgzseoqxwmttgkoxdmvdzcrtg4ars5flpnsa2at2qixzwygfj.py
# Topologically Sorted Source Nodes: [conv2d_2, x_3], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x_3 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_11 = async_compile.triton('triton_poi_fused_convolution_relu_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/a5/ca5aycvivtwycqu7yn2xzgnljbqetxezkymwgte32n4b4c3doezm.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_5 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_12 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = (xindex // 128) % 16
x2 = (xindex // 2048)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (256*x1) + (8192*x2)), None)
tmp1 = tl.load(in_ptr0 + (128 + x0 + (256*x1) + (8192*x2)), None)
tmp3 = tl.load(in_ptr0 + (4096 + x0 + (256*x1) + (8192*x2)), None)
tmp5 = tl.load(in_ptr0 + (4224 + x0 + (256*x1) + (8192*x2)), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3), tmp6, None)
tl.store(out_ptr1 + (x3), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/c3/cc36sjgk3au3ve2witr7srumjy6npsyym5bconvmq65prldokmso.py
# Topologically Sorted Source Nodes: [conv2d_4, x_6], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_4 => convolution_4
# x_6 => relu_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
triton_poi_fused_convolution_relu_13 = async_compile.triton('triton_poi_fused_convolution_relu_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_13', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/l4/cl4n5dxp5ry2ji6m3g5uyniuwrai22ts6qhsulpbeng2mhu4ibj7.py
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_9 => getitem_4, getitem_5
# Graph fragment:
# %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {})
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_14 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_14(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 256
x1 = (xindex // 256) % 8
x2 = (xindex // 2048)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (512*x1) + (8192*x2)), None)
tmp1 = tl.load(in_ptr0 + (256 + x0 + (512*x1) + (8192*x2)), None)
tmp3 = tl.load(in_ptr0 + (4096 + x0 + (512*x1) + (8192*x2)), None)
tmp5 = tl.load(in_ptr0 + (4352 + x0 + (512*x1) + (8192*x2)), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3), tmp6, None)
tl.store(out_ptr1 + (x3), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/bo/cbory36nvcjc37vmkyigprzjn5qrg2tdk4ivdkunxl3icdtgur5z.py
# Topologically Sorted Source Nodes: [conv2d_7, x_10], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_7 => convolution_7
# x_10 => relu_7
# Graph fragment:
# %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_16, %primals_17, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_7 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_7,), kwargs = {})
triton_poi_fused_convolution_relu_15 = async_compile.triton('triton_poi_fused_convolution_relu_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_15', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_15(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/mf/cmfur3mmxa6x6i4nd7emjcnrjpgjj6myhwsch4rls3kx4vdzsyc5.py
# Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_13 => getitem_6, getitem_7
# Graph fragment:
# %getitem_6 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 0), kwargs = {})
# %getitem_7 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_16 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_16(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 512
x1 = (xindex // 512) % 4
x2 = (xindex // 2048)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (1024*x1) + (8192*x2)), None)
tmp1 = tl.load(in_ptr0 + (512 + x0 + (1024*x1) + (8192*x2)), None)
tmp3 = tl.load(in_ptr0 + (4096 + x0 + (1024*x1) + (8192*x2)), None)
tmp5 = tl.load(in_ptr0 + (4608 + x0 + (1024*x1) + (8192*x2)), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3), tmp6, None)
tl.store(out_ptr1 + (x3), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/if/cifuazosgglhb63vtzrpxidmty7j5hpnqulfi4w5cefgewy4woma.py
# Topologically Sorted Source Nodes: [conv2d_10, x_14], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_10 => convolution_10
# x_14 => relu_10
# Graph fragment:
# %convolution_10 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_6, %primals_22, %primals_23, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_10 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_10,), kwargs = {})
triton_poi_fused_convolution_relu_17 = async_compile.triton('triton_poi_fused_convolution_relu_17', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_17', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_17(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/q3/cq3pdtgopvnf6ycuq4bs4t7mjppryujt3u3atlffj7p4pjkfne3u.py
# Topologically Sorted Source Nodes: [conv2d_12, x_16], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_12 => convolution_12
# x_16 => relu_12
# Graph fragment:
# %convolution_12 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_11, %primals_26, %primals_27, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_12 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_12,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_12, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_18 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_18', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 16], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_18', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_18(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2048
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 512
y1 = (yindex // 512)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (512*x2) + (8192*y1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + (16*y3)), tmp4, xmask)
tl.store(out_ptr1 + (y0 + (512*x2) + (8192*y1)), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27 = args
args.clear()
assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128, ), (1, ))
assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (128, ), (1, ))
assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_11, (256, ), (1, ))
assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_13, (256, ), (1, ))
assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_15, (256, ), (1, ))
assert_size_stride(primals_16, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_17, (512, ), (1, ))
assert_size_stride(primals_18, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_19, (512, ), (1, ))
assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_21, (512, ), (1, ))
assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_23, (512, ), (1, ))
assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_25, (512, ), (1, ))
assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_27, (512, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 192, 9, grid=grid(192, 9), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_3, buf1, 12, 4096, grid=grid(12, 4096), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_4, buf2, 4096, 9, grid=grid(4096, 9), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_6, buf3, 8192, 9, grid=grid(8192, 9), stream=stream0)
del primals_6
buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(primals_8, buf4, 16384, 9, grid=grid(16384, 9), stream=stream0)
del primals_8
buf5 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_5.run(primals_10, buf5, 32768, 9, grid=grid(32768, 9), stream=stream0)
del primals_10
buf6 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_6.run(primals_12, buf6, 65536, 9, grid=grid(65536, 9), stream=stream0)
del primals_12
buf7 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_6.run(primals_14, buf7, 65536, 9, grid=grid(65536, 9), stream=stream0)
del primals_14
buf8 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_7.run(primals_16, buf8, 131072, 9, grid=grid(131072, 9), stream=stream0)
del primals_16
buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_8.run(primals_18, buf9, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_18
buf10 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_8.run(primals_20, buf10, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_20
buf11 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_8.run(primals_22, buf11, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_22
buf12 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_8.run(primals_24, buf12, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_24
buf13 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_8.run(primals_26, buf13, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_26
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 64, 64, 64), (262144, 1, 4096, 64))
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_9.run(buf15, primals_2, 1048576, grid=grid(1048576), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf16 = extern_kernels.convolution(buf15, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 64, 64, 64), (262144, 1, 4096, 64))
buf17 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_9.run(buf17, primals_5, 1048576, grid=grid(1048576), stream=stream0)
del primals_5
buf18 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.float32)
buf19 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.int8)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_10.run(buf17, buf18, buf19, 262144, grid=grid(262144), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf20 = extern_kernels.convolution(buf18, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 128, 32, 32), (131072, 1, 4096, 128))
buf21 = buf20; del buf20 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, x_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_11.run(buf21, primals_7, 524288, grid=grid(524288), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf22 = extern_kernels.convolution(buf21, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 128, 32, 32), (131072, 1, 4096, 128))
buf23 = buf22; del buf22 # reuse
# Topologically Sorted Source Nodes: [conv2d_3, x_4], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_11.run(buf23, primals_9, 524288, grid=grid(524288), stream=stream0)
del primals_9
buf24 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.float32)
buf25 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.int8)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_12.run(buf23, buf24, buf25, 131072, grid=grid(131072), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf26 = extern_kernels.convolution(buf24, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf27 = buf26; del buf26 # reuse
# Topologically Sorted Source Nodes: [conv2d_4, x_6], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_13.run(buf27, primals_11, 262144, grid=grid(262144), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution]
buf28 = extern_kernels.convolution(buf27, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf29 = buf28; del buf28 # reuse
# Topologically Sorted Source Nodes: [conv2d_5, x_7], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_13.run(buf29, primals_13, 262144, grid=grid(262144), stream=stream0)
del primals_13
# Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution]
buf30 = extern_kernels.convolution(buf29, buf7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf31 = buf30; del buf30 # reuse
# Topologically Sorted Source Nodes: [conv2d_6, x_8], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_13.run(buf31, primals_15, 262144, grid=grid(262144), stream=stream0)
del primals_15
buf32 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.float32)
buf33 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.int8)
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_14.run(buf31, buf32, buf33, 65536, grid=grid(65536), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution]
buf34 = extern_kernels.convolution(buf32, buf8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf34, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf35 = buf34; del buf34 # reuse
# Topologically Sorted Source Nodes: [conv2d_7, x_10], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_15.run(buf35, primals_17, 131072, grid=grid(131072), stream=stream0)
del primals_17
# Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution]
buf36 = extern_kernels.convolution(buf35, buf9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf37 = buf36; del buf36 # reuse
# Topologically Sorted Source Nodes: [conv2d_8, x_11], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_15.run(buf37, primals_19, 131072, grid=grid(131072), stream=stream0)
del primals_19
# Topologically Sorted Source Nodes: [conv2d_9], Original ATen: [aten.convolution]
buf38 = extern_kernels.convolution(buf37, buf10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf39 = buf38; del buf38 # reuse
# Topologically Sorted Source Nodes: [conv2d_9, x_12], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_15.run(buf39, primals_21, 131072, grid=grid(131072), stream=stream0)
del primals_21
buf40 = empty_strided_cuda((4, 512, 4, 4), (8192, 1, 2048, 512), torch.float32)
buf41 = empty_strided_cuda((4, 512, 4, 4), (8192, 1, 2048, 512), torch.int8)
# Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_16.run(buf39, buf40, buf41, 32768, grid=grid(32768), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_10], Original ATen: [aten.convolution]
buf42 = extern_kernels.convolution(buf40, buf11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf42, (4, 512, 4, 4), (8192, 1, 2048, 512))
buf43 = buf42; del buf42 # reuse
# Topologically Sorted Source Nodes: [conv2d_10, x_14], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_17.run(buf43, primals_23, 32768, grid=grid(32768), stream=stream0)
del primals_23
# Topologically Sorted Source Nodes: [conv2d_11], Original ATen: [aten.convolution]
buf44 = extern_kernels.convolution(buf43, buf12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf44, (4, 512, 4, 4), (8192, 1, 2048, 512))
buf45 = buf44; del buf44 # reuse
# Topologically Sorted Source Nodes: [conv2d_11, x_15], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_17.run(buf45, primals_25, 32768, grid=grid(32768), stream=stream0)
del primals_25
# Topologically Sorted Source Nodes: [conv2d_12], Original ATen: [aten.convolution]
buf46 = extern_kernels.convolution(buf45, buf13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf46, (4, 512, 4, 4), (8192, 1, 2048, 512))
buf47 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.float32)
buf48 = empty_strided_cuda((4, 512, 4, 4), (8192, 1, 2048, 512), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_12, x_16], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_18.run(buf46, primals_27, buf47, buf48, 2048, 16, grid=grid(2048, 16), stream=stream0)
del buf46
del primals_27
return (buf47, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf9, buf10, buf11, buf12, buf13, buf15, buf17, buf18, buf19, buf21, buf23, buf24, buf25, buf27, buf29, buf31, buf32, buf33, buf35, buf37, buf39, buf40, buf41, buf43, buf45, buf48, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
class VGG_16(nn.Module):
"""
VGG-16 without pooling layer before fc layer
"""
def __init__(self):
super(VGG_16, self).__init__()
self.convolution1_1 = nn.Conv2d(3, 64, 3, padding=1)
self.convolution1_2 = nn.Conv2d(64, 64, 3, padding=1)
self.pooling1 = nn.MaxPool2d(2, stride=2)
self.convolution2_1 = nn.Conv2d(64, 128, 3, padding=1)
self.convolution2_2 = nn.Conv2d(128, 128, 3, padding=1)
self.pooling2 = nn.MaxPool2d(2, stride=2)
self.convolution3_1 = nn.Conv2d(128, 256, 3, padding=1)
self.convolution3_2 = nn.Conv2d(256, 256, 3, padding=1)
self.convolution3_3 = nn.Conv2d(256, 256, 3, padding=1)
self.pooling3 = nn.MaxPool2d(2, stride=2)
self.convolution4_1 = nn.Conv2d(256, 512, 3, padding=1)
self.convolution4_2 = nn.Conv2d(512, 512, 3, padding=1)
self.convolution4_3 = nn.Conv2d(512, 512, 3, padding=1)
self.pooling4 = nn.MaxPool2d(2, stride=2)
self.convolution5_1 = nn.Conv2d(512, 512, 3, padding=1)
self.convolution5_2 = nn.Conv2d(512, 512, 3, padding=1)
self.convolution5_3 = nn.Conv2d(512, 512, 3, padding=1)
def forward(self, x):
x = F.relu(self.convolution1_1(x), inplace=True)
x = F.relu(self.convolution1_2(x), inplace=True)
x = self.pooling1(x)
x = F.relu(self.convolution2_1(x), inplace=True)
x = F.relu(self.convolution2_2(x), inplace=True)
x = self.pooling2(x)
x = F.relu(self.convolution3_1(x), inplace=True)
x = F.relu(self.convolution3_2(x), inplace=True)
x = F.relu(self.convolution3_3(x), inplace=True)
x = self.pooling3(x)
x = F.relu(self.convolution4_1(x), inplace=True)
x = F.relu(self.convolution4_2(x), inplace=True)
x = F.relu(self.convolution4_3(x), inplace=True)
x = self.pooling4(x)
x = F.relu(self.convolution5_1(x), inplace=True)
x = F.relu(self.convolution5_2(x), inplace=True)
x = F.relu(self.convolution5_3(x), inplace=True)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 192
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 64
x1 = xindex // 64 % 32
x2 = xindex // 2048
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 8192 * x2), None)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 8192 * x2), None)
tmp3 = tl.load(in_ptr0 + (4096 + x0 + 128 * x1 + 8192 * x2), None)
tmp5 = tl.load(in_ptr0 + (4160 + x0 + 128 * x1 + 8192 * x2), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = xindex // 128 % 16
x2 = xindex // 2048
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1 + 8192 * x2), None)
tmp1 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 8192 * x2), None)
tmp3 = tl.load(in_ptr0 + (4096 + x0 + 256 * x1 + 8192 * x2), None)
tmp5 = tl.load(in_ptr0 + (4224 + x0 + 256 * x1 + 8192 * x2), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_13(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_14(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 256
x1 = xindex // 256 % 8
x2 = xindex // 2048
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 8192 * x2), None)
tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 8192 * x2), None)
tmp3 = tl.load(in_ptr0 + (4096 + x0 + 512 * x1 + 8192 * x2), None)
tmp5 = tl.load(in_ptr0 + (4352 + x0 + 512 * x1 + 8192 * x2), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_15(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_16(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 512
x1 = xindex // 512 % 4
x2 = xindex // 2048
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 8192 * x2), None)
tmp1 = tl.load(in_ptr0 + (512 + x0 + 1024 * x1 + 8192 * x2), None)
tmp3 = tl.load(in_ptr0 + (4096 + x0 + 1024 * x1 + 8192 * x2), None)
tmp5 = tl.load(in_ptr0 + (4608 + x0 + 1024 * x1 + 8192 * x2), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_17(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_18(in_ptr0,
in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr,
XBLOCK: tl.constexpr):
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 512
y1 = yindex // 512
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 512 * x2 + 8192 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + 16 * y3), tmp4, xmask)
tl.store(out_ptr1 + (y0 + 512 * x2 + 8192 * y1), tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27) = args
args.clear()
assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_11, (256,), (1,))
assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_13, (256,), (1,))
assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_15, (256,), (1,))
assert_size_stride(primals_16, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_17, (512,), (1,))
assert_size_stride(primals_18, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_19, (512,), (1,))
assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_21, (512,), (1,))
assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_23, (512,), (1,))
assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_25, (512,), (1,))
assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_27, (512,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(192, 9)](primals_1, buf0, 192, 9, XBLOCK=16,
YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.
float32)
triton_poi_fused_2[grid(4096, 9)](primals_4, buf2, 4096, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_3[grid(8192, 9)](primals_6, buf3, 8192, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_4[grid(16384, 9)](primals_8, buf4, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf5 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_5[grid(32768, 9)](primals_10, buf5, 32768, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_10
buf6 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_6[grid(65536, 9)](primals_12, buf6, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_12
buf7 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_6[grid(65536, 9)](primals_14, buf7, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_14
buf8 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_7[grid(131072, 9)](primals_16, buf8, 131072, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_16
buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_8[grid(262144, 9)](primals_18, buf9, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_18
buf10 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_8[grid(262144, 9)](primals_20, buf10, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_20
buf11 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_8[grid(262144, 9)](primals_22, buf11, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_22
buf12 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_8[grid(262144, 9)](primals_24, buf12, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_24
buf13 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_8[grid(262144, 9)](primals_26, buf13, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_26
buf14 = extern_kernels.convolution(buf1, buf0, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 64, 64, 64), (262144, 1, 4096, 64))
buf15 = buf14
del buf14
triton_poi_fused_convolution_relu_9[grid(1048576)](buf15, primals_2,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf16 = extern_kernels.convolution(buf15, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 64, 64, 64), (262144, 1, 4096, 64))
buf17 = buf16
del buf16
triton_poi_fused_convolution_relu_9[grid(1048576)](buf17, primals_5,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf18 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64),
torch.float32)
buf19 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_10[grid(262144)](buf17,
buf18, buf19, 262144, XBLOCK=512, num_warps=8, num_stages=1)
buf20 = extern_kernels.convolution(buf18, buf3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 128, 32, 32), (131072, 1, 4096, 128))
buf21 = buf20
del buf20
triton_poi_fused_convolution_relu_11[grid(524288)](buf21, primals_7,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf22 = extern_kernels.convolution(buf21, buf4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 128, 32, 32), (131072, 1, 4096, 128))
buf23 = buf22
del buf22
triton_poi_fused_convolution_relu_11[grid(524288)](buf23, primals_9,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_9
buf24 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128),
torch.float32)
buf25 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_12[grid(131072)](buf23,
buf24, buf25, 131072, XBLOCK=512, num_warps=8, num_stages=1)
buf26 = extern_kernels.convolution(buf24, buf5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf27 = buf26
del buf26
triton_poi_fused_convolution_relu_13[grid(262144)](buf27,
primals_11, 262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_11
buf28 = extern_kernels.convolution(buf27, buf6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf29 = buf28
del buf28
triton_poi_fused_convolution_relu_13[grid(262144)](buf29,
primals_13, 262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_13
buf30 = extern_kernels.convolution(buf29, buf7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf31 = buf30
del buf30
triton_poi_fused_convolution_relu_13[grid(262144)](buf31,
primals_15, 262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_15
buf32 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256),
torch.float32)
buf33 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_14[grid(65536)](buf31,
buf32, buf33, 65536, XBLOCK=512, num_warps=4, num_stages=1)
buf34 = extern_kernels.convolution(buf32, buf8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf34, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf35 = buf34
del buf34
triton_poi_fused_convolution_relu_15[grid(131072)](buf35,
primals_17, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_17
buf36 = extern_kernels.convolution(buf35, buf9, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf37 = buf36
del buf36
triton_poi_fused_convolution_relu_15[grid(131072)](buf37,
primals_19, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_19
buf38 = extern_kernels.convolution(buf37, buf10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf39 = buf38
del buf38
triton_poi_fused_convolution_relu_15[grid(131072)](buf39,
primals_21, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_21
buf40 = empty_strided_cuda((4, 512, 4, 4), (8192, 1, 2048, 512),
torch.float32)
buf41 = empty_strided_cuda((4, 512, 4, 4), (8192, 1, 2048, 512),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_16[grid(32768)](buf39,
buf40, buf41, 32768, XBLOCK=256, num_warps=4, num_stages=1)
buf42 = extern_kernels.convolution(buf40, buf11, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf42, (4, 512, 4, 4), (8192, 1, 2048, 512))
buf43 = buf42
del buf42
triton_poi_fused_convolution_relu_17[grid(32768)](buf43, primals_23,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_23
buf44 = extern_kernels.convolution(buf43, buf12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf44, (4, 512, 4, 4), (8192, 1, 2048, 512))
buf45 = buf44
del buf44
triton_poi_fused_convolution_relu_17[grid(32768)](buf45, primals_25,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_25
buf46 = extern_kernels.convolution(buf45, buf13, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf46, (4, 512, 4, 4), (8192, 1, 2048, 512))
buf47 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.
float32)
buf48 = empty_strided_cuda((4, 512, 4, 4), (8192, 1, 2048, 512),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_18[grid(2048, 16)
](buf46, primals_27, buf47, buf48, 2048, 16, XBLOCK=16, YBLOCK=
64, num_warps=4, num_stages=1)
del buf46
del primals_27
return (buf47, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8,
buf9, buf10, buf11, buf12, buf13, buf15, buf17, buf18, buf19, buf21,
buf23, buf24, buf25, buf27, buf29, buf31, buf32, buf33, buf35,
buf37, buf39, buf40, buf41, buf43, buf45, buf48)
class VGG_16New(nn.Module):
"""
VGG-16 without pooling layer before fc layer
"""
def __init__(self):
super(VGG_16New, self).__init__()
self.convolution1_1 = nn.Conv2d(3, 64, 3, padding=1)
self.convolution1_2 = nn.Conv2d(64, 64, 3, padding=1)
self.pooling1 = nn.MaxPool2d(2, stride=2)
self.convolution2_1 = nn.Conv2d(64, 128, 3, padding=1)
self.convolution2_2 = nn.Conv2d(128, 128, 3, padding=1)
self.pooling2 = nn.MaxPool2d(2, stride=2)
self.convolution3_1 = nn.Conv2d(128, 256, 3, padding=1)
self.convolution3_2 = nn.Conv2d(256, 256, 3, padding=1)
self.convolution3_3 = nn.Conv2d(256, 256, 3, padding=1)
self.pooling3 = nn.MaxPool2d(2, stride=2)
self.convolution4_1 = nn.Conv2d(256, 512, 3, padding=1)
self.convolution4_2 = nn.Conv2d(512, 512, 3, padding=1)
self.convolution4_3 = nn.Conv2d(512, 512, 3, padding=1)
self.pooling4 = nn.MaxPool2d(2, stride=2)
self.convolution5_1 = nn.Conv2d(512, 512, 3, padding=1)
self.convolution5_2 = nn.Conv2d(512, 512, 3, padding=1)
self.convolution5_3 = nn.Conv2d(512, 512, 3, padding=1)
def forward(self, input_0):
primals_1 = self.convolution1_1.weight
primals_2 = self.convolution1_1.bias
primals_4 = self.convolution1_2.weight
primals_5 = self.convolution1_2.bias
primals_6 = self.convolution2_1.weight
primals_7 = self.convolution2_1.bias
primals_8 = self.convolution2_2.weight
primals_9 = self.convolution2_2.bias
primals_10 = self.convolution3_1.weight
primals_11 = self.convolution3_1.bias
primals_12 = self.convolution3_2.weight
primals_13 = self.convolution3_2.bias
primals_14 = self.convolution3_3.weight
primals_15 = self.convolution3_3.bias
primals_16 = self.convolution4_1.weight
primals_17 = self.convolution4_1.bias
primals_18 = self.convolution4_2.weight
primals_19 = self.convolution4_2.bias
primals_20 = self.convolution4_3.weight
primals_21 = self.convolution4_3.bias
primals_22 = self.convolution5_1.weight
primals_23 = self.convolution5_1.bias
primals_24 = self.convolution5_2.weight
primals_25 = self.convolution5_2.bias
primals_26 = self.convolution5_3.weight
primals_27 = self.convolution5_3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27])
return output[0]
| qiu9yu/Lets_OCR | VGG_16 | false | 16,368 | [
"MIT"
] | 671 | 62d68b044250d02a9d5ac8c4fbd08cec83faa0d1 | https://github.com/qiu9yu/Lets_OCR/tree/62d68b044250d02a9d5ac8c4fbd08cec83faa0d1 |
CNN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ln/cln6qydwyqg4qnxsbkqnv3hx3efw2a7vyiqfft6cddbhbrgdvyyu.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192, 64], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8192
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (36*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (2304*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5q/c5qrspwskvpybawg5gxji2xsvpkstvwbi7pblvjxn3j2hjj6m6iu.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16384
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = (yindex // 128)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (128*x2) + (3200*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/yi/cyiu7hnvvrrmu6u67tnrnbvu5ggcchabkpbfczcoqyl3b43asnnh.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256, 4096], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 256
xnumel = 3249
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (3249*y3)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (y0 + (64*x2) + (207936*y1)), tmp4, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/q7/cq7oayogltflabzshimf6ms35y47yn42ipnie3be27awgypmq6d7.py
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_3 = async_compile.triton('triton_poi_fused_convolution_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 346112
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/fa/cfag6r4g25pz77g4rixrcv76giwwqdy3x3c4qlvexfleqbf3dic7.py
# Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x_2 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_4 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512, 512], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 512
xnumel = 484
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 128
y1 = (yindex // 128)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (128*x2) + (61952*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + (484*y3)), tmp4, xmask & ymask)
tl.store(out_ptr1 + (y0 + (128*x2) + (61952*y1)), tmp6, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (64, 1, 8, 8), (64, 64, 8, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (128, 64, 6, 6), (2304, 36, 6, 1))
assert_size_stride(primals_5, (128, ), (1, ))
assert_size_stride(primals_6, (128, 128, 5, 5), (3200, 25, 5, 1))
assert_size_stride(primals_7, (128, ), (1, ))
assert_size_stride(primals_8, (128, 2048), (2048, 1))
assert_size_stride(primals_9, (128, ), (1, ))
assert_size_stride(primals_10, (10, 128), (128, 1))
assert_size_stride(primals_11, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((128, 64, 6, 6), (2304, 1, 384, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_4, buf0, 8192, 36, grid=grid(8192, 36), stream=stream0)
del primals_4
buf1 = empty_strided_cuda((128, 128, 5, 5), (3200, 1, 640, 128), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_6, buf1, 16384, 25, grid=grid(16384, 25), stream=stream0)
del primals_6
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 57, 57), (207936, 3249, 57, 1))
buf3 = empty_strided_cuda((4, 64, 57, 57), (207936, 1, 3648, 64), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf2, primals_2, buf3, 256, 3249, grid=grid(256, 3249), stream=stream0)
del buf2
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, buf0, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 128, 26, 26), (86528, 1, 3328, 128))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_3.run(buf5, primals_5, 346112, grid=grid(346112), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, buf1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 128, 22, 22), (61952, 1, 2816, 128))
buf7 = empty_strided_cuda((4, 128, 22, 22), (61952, 484, 22, 1), torch.float32)
buf10 = empty_strided_cuda((4, 128, 22, 22), (61952, 1, 2816, 128), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_4.run(buf6, primals_7, buf7, buf10, 512, 484, grid=grid(512, 484), stream=stream0)
del buf6
del primals_7
buf8 = empty_strided_cuda((121, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, reinterpret_tensor(buf7, (121, 2048), (2048, 1), 0), reinterpret_tensor(primals_8, (2048, 128), (1, 2048), 0), alpha=1, beta=1, out=buf8)
del primals_9
buf9 = empty_strided_cuda((121, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, buf8, reinterpret_tensor(primals_10, (128, 10), (1, 128), 0), alpha=1, beta=1, out=buf9)
del primals_11
return (buf9, primals_1, primals_3, buf0, buf1, buf3, buf5, reinterpret_tensor(buf7, (121, 2048), (2048, 1), 0), buf8, primals_10, primals_8, buf10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 1, 8, 8), (64, 64, 8, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((128, 64, 6, 6), (2304, 36, 6, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, 128, 5, 5), (3200, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((128, 2048), (2048, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((10, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class CNN(torch.nn.Module):
"""Basic CNN architecture."""
def __init__(self, in_channels=1):
super(CNN, self).__init__()
self.conv1 = nn.Conv2d(in_channels, 64, 8, 1)
self.conv2 = nn.Conv2d(64, 128, 6, 2)
self.conv3 = nn.Conv2d(128, 128, 5, 1)
self.fc1 = nn.Linear(128 * 4 * 4, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = x.view(-1, 128 * 4 * 4)
x = self.fc1(x)
x = self.fc2(x)
return x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 36 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 64 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 128 * x2 + 3200 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 256
xnumel = 3249
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 3249 * y3), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (y0 + 64 * x2 + 207936 * y1), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_4(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
ynumel = 512
xnumel = 484
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 128
y1 = yindex // 128
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 128 * x2 + 61952 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + 484 * y3), tmp4, xmask & ymask)
tl.store(out_ptr1 + (y0 + 128 * x2 + 61952 * y1), tmp6, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (64, 1, 8, 8), (64, 64, 8, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (128, 64, 6, 6), (2304, 36, 6, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (128, 128, 5, 5), (3200, 25, 5, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (128, 2048), (2048, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (10, 128), (128, 1))
assert_size_stride(primals_11, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((128, 64, 6, 6), (2304, 1, 384, 64),
torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(8192, 36)](primals_4, buf0, 8192, 36,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_4
buf1 = empty_strided_cuda((128, 128, 5, 5), (3200, 1, 640, 128),
torch.float32)
triton_poi_fused_1[grid(16384, 25)](primals_6, buf1, 16384, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_6
buf2 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 57, 57), (207936, 3249, 57, 1))
buf3 = empty_strided_cuda((4, 64, 57, 57), (207936, 1, 3648, 64),
torch.float32)
triton_poi_fused_convolution_relu_2[grid(256, 3249)](buf2,
primals_2, buf3, 256, 3249, XBLOCK=256, YBLOCK=16, num_warps=8,
num_stages=1)
del buf2
del primals_2
buf4 = extern_kernels.convolution(buf3, buf0, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 128, 26, 26), (86528, 1, 3328, 128))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_3[grid(346112)](buf5, primals_5,
346112, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf6 = extern_kernels.convolution(buf5, buf1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 128, 22, 22), (61952, 1, 2816, 128))
buf7 = empty_strided_cuda((4, 128, 22, 22), (61952, 484, 22, 1),
torch.float32)
buf10 = empty_strided_cuda((4, 128, 22, 22), (61952, 1, 2816, 128),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_4[grid(512, 484)](
buf6, primals_7, buf7, buf10, 512, 484, XBLOCK=32, YBLOCK=32,
num_warps=4, num_stages=1)
del buf6
del primals_7
buf8 = empty_strided_cuda((121, 128), (128, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf7, (121, 2048
), (2048, 1), 0), reinterpret_tensor(primals_8, (2048, 128), (1,
2048), 0), alpha=1, beta=1, out=buf8)
del primals_9
buf9 = empty_strided_cuda((121, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_11, buf8, reinterpret_tensor(
primals_10, (128, 10), (1, 128), 0), alpha=1, beta=1, out=buf9)
del primals_11
return (buf9, primals_1, primals_3, buf0, buf1, buf3, buf5,
reinterpret_tensor(buf7, (121, 2048), (2048, 1), 0), buf8,
primals_10, primals_8, buf10)
class CNNNew(torch.nn.Module):
"""Basic CNN architecture."""
def __init__(self, in_channels=1):
super(CNNNew, self).__init__()
self.conv1 = nn.Conv2d(in_channels, 64, 8, 1)
self.conv2 = nn.Conv2d(64, 128, 6, 2)
self.conv3 = nn.Conv2d(128, 128, 5, 1)
self.fc1 = nn.Linear(128 * 4 * 4, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.fc1.weight
primals_9 = self.fc1.bias
primals_10 = self.fc2.weight
primals_11 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| saumya0303/cleverhans | CNN | false | 16,369 | [
"MIT"
] | 4,333 | 03f3ee254c2a1c4ebd91728263b66ff29e8b4f78 | https://github.com/saumya0303/cleverhans/tree/03f3ee254c2a1c4ebd91728263b66ff29e8b4f78 |
DeConv2dBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/yb/cybpjmzbifjzzgcvo732lai5metqpn6lv5qgv2usa3v2ykeb3mq7.py
# Topologically Sorted Source Nodes: [x, x_2], Original ATen: [aten.convolution, aten.silu]
# Source node to ATen node mapping:
# x => convolution
# x_2 => mul, sigmoid
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [2, 2], [1, 1], True, [1, 1], 1), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, %sigmoid), kwargs = {})
triton_poi_fused_convolution_silu_0 = async_compile.triton('triton_poi_fused_convolution_silu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_silu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_silu_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 36) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
tl.store(out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/4f/c4f4mu3fy43wvkdpljn3idkt2qjt7pkweg7fpd4ialsrbzpnr46o.py
# Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.convolution, aten.silu]
# Source node to ATen node mapping:
# x_3 => convolution_1
# x_4 => mul_1, sigmoid_1
# Graph fragment:
# %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%mul, %primals_4, %primals_5, [2, 2], [1, 1], [1, 1], True, [1, 1], 1), kwargs = {})
# %sigmoid_1 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_1,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, %sigmoid_1), kwargs = {})
triton_poi_fused_convolution_silu_1 = async_compile.triton('triton_poi_fused_convolution_silu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_silu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_silu_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 144) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
tl.store(out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 6, 6), (144, 36, 6, 1))
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, x_2], Original ATen: [aten.convolution, aten.silu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_silu_0.run(buf1, primals_2, buf2, 576, grid=grid(576), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 12, 12), (576, 144, 12, 1))
buf4 = buf3; del buf3 # reuse
buf5 = empty_strided_cuda((4, 4, 12, 12), (576, 144, 12, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.convolution, aten.silu]
triton_poi_fused_convolution_silu_1.run(buf4, primals_5, buf5, 2304, grid=grid(2304), stream=stream0)
del primals_5
return (buf5, primals_1, primals_3, primals_4, buf1, buf2, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class DeConv2dBlock(nn.Module):
"""
Similar to a LeNet block
4x upsampling, dimension hard-coded
"""
def __init__(self, in_dim: 'int', hidden_dim: 'int', out_dim: 'int',
stride: 'int'=2, kernel_size: 'int'=3, padding: 'int'=2,
output_padding: 'int'=1, dropout=0.1, activation_type='silu', debug
=False):
super(DeConv2dBlock, self).__init__()
padding1 = padding // 2 if padding // 2 >= 1 else 1
self.deconv0 = nn.ConvTranspose2d(in_channels=in_dim, out_channels=
hidden_dim, kernel_size=kernel_size, stride=stride,
output_padding=output_padding, padding=padding)
self.deconv1 = nn.ConvTranspose2d(in_channels=hidden_dim,
out_channels=out_dim, kernel_size=kernel_size, stride=stride,
output_padding=output_padding, padding=padding1)
self.activation = nn.SiLU() if activation_type == 'silu' else nn.ReLU()
self.dropout = nn.Dropout(dropout)
self.debug = debug
def forward(self, x):
x = self.deconv0(x)
x = self.dropout(x)
x = self.activation(x)
x = self.deconv1(x)
x = self.activation(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'hidden_dim': 4, 'out_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_silu_0(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 36 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_silu_1(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 2304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 144 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2,
2), padding=(2, 2), dilation=(1, 1), transposed=True,
output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 6, 6), (144, 36, 6, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_silu_0[grid(576)](buf1, primals_2,
buf2, 576, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 12, 12), (576, 144, 12, 1))
buf4 = buf3
del buf3
buf5 = empty_strided_cuda((4, 4, 12, 12), (576, 144, 12, 1), torch.
float32)
triton_poi_fused_convolution_silu_1[grid(2304)](buf4, primals_5,
buf5, 2304, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
return buf5, primals_1, primals_3, primals_4, buf1, buf2, buf4
class DeConv2dBlockNew(nn.Module):
"""
Similar to a LeNet block
4x upsampling, dimension hard-coded
"""
def __init__(self, in_dim: 'int', hidden_dim: 'int', out_dim: 'int',
stride: 'int'=2, kernel_size: 'int'=3, padding: 'int'=2,
output_padding: 'int'=1, dropout=0.1, activation_type='silu', debug
=False):
super(DeConv2dBlockNew, self).__init__()
padding1 = padding // 2 if padding // 2 >= 1 else 1
self.deconv0 = nn.ConvTranspose2d(in_channels=in_dim, out_channels=
hidden_dim, kernel_size=kernel_size, stride=stride,
output_padding=output_padding, padding=padding)
self.deconv1 = nn.ConvTranspose2d(in_channels=hidden_dim,
out_channels=out_dim, kernel_size=kernel_size, stride=stride,
output_padding=output_padding, padding=padding1)
self.activation = nn.SiLU() if activation_type == 'silu' else nn.ReLU()
self.dropout = nn.Dropout(dropout)
self.debug = debug
def forward(self, input_0):
primals_1 = self.deconv0.weight
primals_2 = self.deconv0.bias
primals_4 = self.deconv1.weight
primals_5 = self.deconv1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| scaomath/galerkin-transformer | DeConv2dBlock | false | 16,370 | [
"MIT"
] | 106 | a9c2dc4427bfaba051d7e0154f110e460050c1df | https://github.com/scaomath/galerkin-transformer/tree/a9c2dc4427bfaba051d7e0154f110e460050c1df |
Block | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/pw/cpw5jgywzg5ntkknxkt5orxsrrr5zq7a6eoteboi3ba7zrcxj2p7.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [3, 3], [1, 1], False, [0, 0], 4), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/up/cup7k6lckdxdeumc5wkgvvawwr2i7omafeixxhsjlwoqypnernlr.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# x_2 => add, clone, rsqrt, var_mean
# Graph fragment:
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-06), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-06
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x2), tmp8, xmask)
tl.store(out_ptr1 + (x2), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/vl/cvl5znscfya2vyloz4woz3qs3yhrnlmrhqdjrm2ups76tnqugnij.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# x_2 => add, add_1, clone, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-06), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_4), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_5), kwargs = {})
triton_poi_fused_native_layer_norm_2 = async_compile.triton('triton_poi_fused_native_layer_norm_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = (yindex // 16)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y3), ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (y3), ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x2), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x2), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2 + (4*y3)), tmp8, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/z6/cz63vnafhvqvexb6jef6mkdhstxrrarf4q5x6w7vrrn5dk6hodcl.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.gelu]
# Source node to ATen node mapping:
# x_4 => add_2, erf, mul_2, mul_3, mul_4
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.7071067811865476), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_3,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %add_2), kwargs = {})
triton_poi_fused_gelu_3 = async_compile.triton('triton_poi_fused_gelu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gelu_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/rb/crbbxy4r73vzsuemoueepohmvczbngc44c526mksdnqkvidq3k2g.py
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x_8 => add_3
# Graph fragment:
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %permute_3), kwargs = {})
triton_poi_fused_add_4 = async_compile.triton('triton_poi_fused_add_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask)
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (y0 + (4*x2) + (64*y1)), xmask & ymask)
tmp3 = tmp1 * tmp2
tmp4 = tmp0 + tmp3
tl.store(out_ptr0 + (x2 + (16*y3)), tmp4, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 1, 7, 7), (49, 49, 7, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (16, 4), (4, 1))
assert_size_stride(primals_7, (16, ), (1, ))
assert_size_stride(primals_8, (4, 16), (16, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(buf1, buf2, buf3, 64, grid=grid(64), stream=stream0)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_2.run(buf1, buf2, buf3, primals_4, primals_5, buf4, 64, 4, grid=grid(64, 4), stream=stream0)
del buf2
del buf3
del primals_5
buf5 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf5)
del primals_7
buf6 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.gelu]
triton_poi_fused_gelu_3.run(buf5, buf6, 1024, grid=grid(1024), stream=stream0)
buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, reinterpret_tensor(buf6, (64, 16), (16, 1), 0), reinterpret_tensor(primals_8, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf7)
del primals_9
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.add]
triton_poi_fused_add_4.run(primals_1, primals_10, buf7, buf8, 16, 16, grid=grid(16, 16), stream=stream0)
return (buf8, primals_1, primals_2, primals_4, primals_10, buf1, reinterpret_tensor(buf4, (64, 4), (4, 1), 0), buf5, reinterpret_tensor(buf6, (64, 16), (16, 1), 0), buf7, primals_8, primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 1, 7, 7), (49, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class LayerNorm(nn.Module):
"""LayerNorm that supports two data formats: channels_last (default) or channels_first.
The ordering of the dimensions in the inputs. channels_last corresponds to inputs with
shape (batch_size, height, width, channels) while channels_first corresponds to inputs
with shape (batch_size, channels, height, width).
"""
def __init__(self, normalized_shape, eps=1e-06, data_format='channels_last'
):
super().__init__()
self.weight = nn.Parameter(torch.ones(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
self.eps = eps
self.data_format = data_format
if self.data_format not in ['channels_last', 'channels_first']:
raise NotImplementedError
self.normalized_shape = normalized_shape,
def forward(self, x):
if self.data_format == 'channels_last':
return F.layer_norm(x, self.normalized_shape, self.weight, self
.bias, self.eps)
elif self.data_format == 'channels_first':
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight[:, None, None] * x + self.bias[:, None, None]
return x
class Block(nn.Module):
"""ConvNeXt Block. There are two equivalent implementations:
(1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W)
(2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back
We use (2) as we find it slightly faster in PyTorch
Args:
dim (int): Number of input channels.
drop_path (float): Stochastic depth rate. Default: 0.0
layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
"""
def __init__(self, dim, drop_path=0.0, layer_scale_init_value=1e-06):
super().__init__()
self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim)
self.norm = LayerNorm(dim, eps=1e-06)
self.pwconv1 = nn.Linear(dim, 4 * dim)
self.act = nn.GELU()
self.pwconv2 = nn.Linear(4 * dim, dim)
self.gamma = nn.Parameter(layer_scale_init_value * torch.ones(dim),
requires_grad=True) if layer_scale_init_value > 0 else None
self.drop_path = DropPath(drop_path
) if drop_path > 0.0 else nn.Identity()
def forward(self, x):
input = x
x = self.dwconv(x)
x = x.permute(0, 2, 3, 1)
x = self.norm(x)
x = self.pwconv1(x)
x = self.act(x)
x = self.pwconv2(x)
if self.gamma is not None:
x = self.gamma * x
x = x.permute(0, 3, 1, 2)
x = input + self.drop_path(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-06
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x2, tmp8, xmask)
tl.store(out_ptr1 + x2, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y3, ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + y3, ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2 + 4 * y3), tmp8, xmask & ymask)
@triton.jit
def triton_poi_fused_gelu_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask)
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp3 = tmp1 * tmp2
tmp4 = tmp0 + tmp3
tl.store(out_ptr0 + (x2 + 16 * y3), tmp4, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 1, 7, 7), (49, 49, 7, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (16, 4), (4, 1))
assert_size_stride(primals_7, (16,), (1,))
assert_size_stride(primals_8, (4, 16), (16, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(256)](buf1, primals_3, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(64)](buf1, buf2, buf3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_2[grid(64, 4)](buf1, buf2, buf3,
primals_4, primals_5, buf4, 64, 4, XBLOCK=4, YBLOCK=32,
num_warps=4, num_stages=1)
del buf2
del buf3
del primals_5
buf5 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf4, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 16), (1, 4), 0),
alpha=1, beta=1, out=buf5)
del primals_7
buf6 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.
float32)
triton_poi_fused_gelu_3[grid(1024)](buf5, buf6, 1024, XBLOCK=128,
num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf6, (64, 16),
(16, 1), 0), reinterpret_tensor(primals_8, (16, 4), (1, 16), 0),
alpha=1, beta=1, out=buf7)
del primals_9
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_4[grid(16, 16)](primals_1, primals_10, buf7,
buf8, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
return (buf8, primals_1, primals_2, primals_4, primals_10, buf1,
reinterpret_tensor(buf4, (64, 4), (4, 1), 0), buf5,
reinterpret_tensor(buf6, (64, 16), (16, 1), 0), buf7, primals_8,
primals_6)
class LayerNorm(nn.Module):
"""LayerNorm that supports two data formats: channels_last (default) or channels_first.
The ordering of the dimensions in the inputs. channels_last corresponds to inputs with
shape (batch_size, height, width, channels) while channels_first corresponds to inputs
with shape (batch_size, channels, height, width).
"""
def __init__(self, normalized_shape, eps=1e-06, data_format='channels_last'
):
super().__init__()
self.weight = nn.Parameter(torch.ones(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
self.eps = eps
self.data_format = data_format
if self.data_format not in ['channels_last', 'channels_first']:
raise NotImplementedError
self.normalized_shape = normalized_shape,
def forward(self, x):
if self.data_format == 'channels_last':
return F.layer_norm(x, self.normalized_shape, self.weight, self
.bias, self.eps)
elif self.data_format == 'channels_first':
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight[:, None, None] * x + self.bias[:, None, None]
return x
class BlockNew(nn.Module):
"""ConvNeXt Block. There are two equivalent implementations:
(1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W)
(2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back
We use (2) as we find it slightly faster in PyTorch
Args:
dim (int): Number of input channels.
drop_path (float): Stochastic depth rate. Default: 0.0
layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
"""
def __init__(self, dim, drop_path=0.0, layer_scale_init_value=1e-06):
super().__init__()
self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim)
self.norm = LayerNorm(dim, eps=1e-06)
self.pwconv1 = nn.Linear(dim, 4 * dim)
self.act = nn.GELU()
self.pwconv2 = nn.Linear(4 * dim, dim)
self.gamma = nn.Parameter(layer_scale_init_value * torch.ones(dim),
requires_grad=True) if layer_scale_init_value > 0 else None
self.drop_path = DropPath(drop_path
) if drop_path > 0.0 else nn.Identity()
def forward(self, input_0):
primals_3 = self.gamma
primals_2 = self.dwconv.weight
primals_4 = self.dwconv.bias
primals_5 = self.norm.weight
primals_9 = self.norm.bias
primals_6 = self.pwconv1.weight
primals_7 = self.pwconv1.bias
primals_8 = self.pwconv2.weight
primals_10 = self.pwconv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
| sayakpaul/ConvNeXt-TF | Block | false | 16,371 | [
"Apache-2.0"
] | 68 | bf610810558b4248cd969aa7db42fadff1fdf57a | https://github.com/sayakpaul/ConvNeXt-TF/tree/bf610810558b4248cd969aa7db42fadff1fdf57a |
ResizeConv2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/oj/cojl5mb3pzv5jbmfzjkbac5hekbmpvb72kof6ouyyasitrogdd6n.py
# Topologically Sorted Source Nodes: [h], Original ATen: [aten._unsafe_index]
# Source node to ATen node mapping:
# h => _unsafe_index
# Graph fragment:
# %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %unsqueeze, %convert_element_type_1]), kwargs = {})
triton_poi_fused__unsafe_index_0 = async_compile.triton('triton_poi_fused__unsafe_index_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 8) % 8
x0 = xindex % 8
x2 = (xindex // 64)
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tmp5 = x0
tmp6 = tmp5.to(tl.float32)
tmp7 = tmp6 * tmp2
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.load(in_ptr0 + (tmp8 + (4*tmp4) + (16*x2)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x4), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/qm/cqm7mt4kwh3jd6llmoode3c6bmw47aaliqeicit6kfmkic7nsyrl.py
# Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# h_1 => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index, %primals_2, %primals_3, [1, 1], [4, 4], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2704
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 169) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [h], Original ATen: [aten._unsafe_index]
stream0 = get_raw_stream(0)
triton_poi_fused__unsafe_index_0.run(primals_1, buf0, 1024, grid=grid(1024), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 13, 13), (676, 169, 13, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_3, 2704, grid=grid(2704), stream=stream0)
del primals_3
return (buf2, primals_2, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch.nn as nn
class ResizeConv2d(nn.Module):
def __init__(self, input_channels, output_channels, kernel_size, stride,
padding, dilation=1, scale_factor=2, activation=None):
super(ResizeConv2d, self).__init__()
self.activation = activation
self.upsamplingNN = nn.Upsample(scale_factor=scale_factor)
self.conv = nn.Conv2d(input_channels, output_channels, kernel_size,
stride, padding, dilation)
def forward(self, x):
h = self.upsamplingNN(x)
h = self.conv(h)
if self.activation is None:
out = h
else:
out = self.activation(h)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_channels': 4, 'output_channels': 4, 'kernel_size':
4, 'stride': 1, 'padding': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 8 % 8
x0 = xindex % 8
x2 = xindex // 64
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tmp5 = x0
tmp6 = tmp5.to(tl.float32)
tmp7 = tmp6 * tmp2
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * x2), xmask,
eviction_policy='evict_last')
tl.store(out_ptr0 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 2704
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 169 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__unsafe_index_0[grid(1024)](primals_1, buf0, 1024,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(4, 4), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 13, 13), (676, 169, 13, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(2704)](buf2, primals_3, 2704,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
return buf2, primals_2, buf0
class ResizeConv2dNew(nn.Module):
def __init__(self, input_channels, output_channels, kernel_size, stride,
padding, dilation=1, scale_factor=2, activation=None):
super(ResizeConv2dNew, self).__init__()
self.activation = activation
self.upsamplingNN = nn.Upsample(scale_factor=scale_factor)
self.conv = nn.Conv2d(input_channels, output_channels, kernel_size,
stride, padding, dilation)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_3 = self.conv.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| sanghiad/vae_vampprior | ResizeConv2d | false | 16,372 | [
"MIT"
] | 218 | d24bc0c8781b7ee7b9570c2d560e43bceff50da4 | https://github.com/sanghiad/vae_vampprior/tree/d24bc0c8781b7ee7b9570c2d560e43bceff50da4 |
MarginMSELoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/7w/c7wzkxjlezwpig65b2vlh6vgbopagjzn2sss6qfwgr2257fqgbtu.py
# Topologically Sorted Source Nodes: [sub, sub_1, sub_2, pow_1, loss], Original ATen: [aten.sub, aten.pow, aten.mean]
# Source node to ATen node mapping:
# loss => mean
# pow_1 => pow_1
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg2_1, %arg3_1), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %sub_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
triton_per_fused_mean_pow_sub_0 = async_compile.triton('triton_per_fused_mean_pow_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_pow_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp3 = tl.load(in_ptr2 + (r0), None)
tmp4 = tl.load(in_ptr3 + (r0), None)
tmp2 = tmp0 - tmp1
tmp5 = tmp3 - tmp4
tmp6 = tmp2 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = 256.0
tmp12 = tmp10 / tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp12, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, sub_1, sub_2, pow_1, loss], Original ATen: [aten.sub, aten.pow, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_pow_sub_0.run(buf1, arg0_1, arg1_1, arg2_1, arg3_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class MarginMSELoss(nn.Module):
def __init__(self):
super(MarginMSELoss, self).__init__()
def forward(self, scores_pos, scores_neg, label_pos, label_neg):
"""
A Margin-MSE loss, receiving 2 scores and 2 labels and it computes the MSE of the respective margins.
All inputs should be tensors of equal size
"""
loss = torch.mean(torch.pow(scores_pos - scores_neg - (label_pos -
label_neg), 2))
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
in_ptr3, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp3 = tl.load(in_ptr2 + r0, None)
tmp4 = tl.load(in_ptr3 + r0, None)
tmp2 = tmp0 - tmp1
tmp5 = tmp3 - tmp4
tmp6 = tmp2 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = 256.0
tmp12 = tmp10 / tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_pow_sub_0[grid(1)](buf1, arg0_1, arg1_1,
arg2_1, arg3_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return buf1,
class MarginMSELossNew(nn.Module):
def __init__(self):
super(MarginMSELossNew, self).__init__()
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
| sebastian-hofstaetter/neural-ranking-kd | MarginMSELoss | false | 16,373 | [
"Apache-2.0"
] | 51 | aafcc73d6b78ee9849c3d8f5ccf084051fcae2e9 | https://github.com/sebastian-hofstaetter/neural-ranking-kd/tree/aafcc73d6b78ee9849c3d8f5ccf084051fcae2e9 |
GMoF_unscaled | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/nw/cnwdsq3abo7fe6mxpotwfwzs6o5r6xx66xxpdvq3cncsk4wtutgy.py
# Topologically Sorted Source Nodes: [squared_res, add, dist], Original ATen: [aten.pow, aten.add, aten.div]
# Source node to ATen node mapping:
# add => add
# dist => div
# squared_res => pow_1
# Graph fragment:
# %pow_1 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_1, 1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%pow_1, %add), kwargs = {})
triton_poi_fused_add_div_pow_0 = async_compile.triton('triton_poi_fused_add_div_pow_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_pow_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tmp0 * tmp0
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = tmp1 / tmp3
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [squared_res, add, dist], Original ATen: [aten.pow, aten.add, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_pow_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class GMoF_unscaled(nn.Module):
def __init__(self, rho=1):
super(GMoF_unscaled, self).__init__()
self.rho = rho
def extra_repr(self):
return 'rho = {}'.format(self.rho)
def forward(self, residual):
squared_res = residual ** 2
dist = torch.div(squared_res, squared_res + self.rho ** 2)
return dist
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 * tmp0
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = tmp1 / tmp3
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_pow_0[grid(256)](arg0_1, buf0, 256, XBLOCK
=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class GMoF_unscaledNew(nn.Module):
def __init__(self, rho=1):
super(GMoF_unscaledNew, self).__init__()
self.rho = rho
def extra_repr(self):
return 'rho = {}'.format(self.rho)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| sanweiliti/HMP | GMoF_unscaled | false | 16,374 | [
"MIT"
] | 92 | 3d1a96ec86a72396349daa9f8dde9b2e5a3fc578 | https://github.com/sanweiliti/HMP/tree/3d1a96ec86a72396349daa9f8dde9b2e5a3fc578 |
ChannelNorm2D | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/k2/ck2ahiysbz5am2kdqgxfcalsms7yveiz5dvoxtgdx6inzxd62dti.py
# Topologically Sorted Source Nodes: [mu, var, sub, add, rsqrt, x_normed, mul_1, x_normed_1], Original ATen: [aten.mean, aten.var, aten.sub, aten.add, aten.rsqrt, aten.mul]
# Source node to ATen node mapping:
# add => add
# mu => mean
# mul_1 => mul_1
# rsqrt => rsqrt
# sub => sub
# var => var
# x_normed => mul
# x_normed_1 => add_1
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [1], True), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [1]), kwargs = {correction: 1, keepdim: True})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%var, 0.001), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %mul), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {})
triton_poi_fused_add_mean_mul_rsqrt_sub_var_0 = async_compile.triton('triton_poi_fused_add_mean_mul_rsqrt_sub_var_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_mul_rsqrt_sub_var_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mean_mul_rsqrt_sub_var_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 4
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x3), xmask)
tmp2 = tl.load(in_ptr1 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp2 - tmp10
tmp13 = tmp12 * tmp12
tmp14 = tmp3 - tmp10
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp10
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp7 - tmp10
tmp21 = tmp20 * tmp20
tmp22 = tmp19 + tmp21
tmp23 = 3.0
tmp24 = tmp22 / tmp23
tmp25 = 0.001
tmp26 = tmp24 + tmp25
tmp27 = libdevice.rsqrt(tmp26)
tmp28 = tmp11 * tmp27
tmp29 = tmp0 * tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + (x3), tmp31, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mu, var, sub, add, rsqrt, x_normed, mul_1, x_normed_1], Original ATen: [aten.mean, aten.var, aten.sub, aten.add, aten.rsqrt, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mean_mul_rsqrt_sub_var_0.run(primals_2, primals_1, primals_3, buf0, 256, grid=grid(256), stream=stream0)
del primals_2
del primals_3
return (buf0, primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class ChannelNorm2D(nn.Module):
"""
Similar to default Torch instanceNorm2D but calculates
moments over channel dimension instead of spatial dims.
Expects input_dim in format (B,C,H,W)
"""
def __init__(self, input_channels, momentum=0.1, eps=0.001, affine=True,
**kwargs):
super(ChannelNorm2D, self).__init__()
self.momentum = momentum
self.eps = eps
self.affine = affine
if affine is True:
self.gamma = nn.Parameter(torch.ones(1, input_channels, 1, 1))
self.beta = nn.Parameter(torch.zeros(1, input_channels, 1, 1))
def forward(self, x):
"""
Calculate moments over channel dim, normalize.
x: Image tensor, shape (B,C,H,W)
"""
mu, var = torch.mean(x, dim=1, keepdim=True), torch.var(x, dim=1,
keepdim=True)
x_normed = (x - mu) * torch.rsqrt(var + self.eps)
if self.affine is True:
x_normed = self.gamma * x_normed + self.beta
return x_normed
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_channels': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mean_mul_rsqrt_sub_var_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 4
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp2 - tmp10
tmp13 = tmp12 * tmp12
tmp14 = tmp3 - tmp10
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp10
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp7 - tmp10
tmp21 = tmp20 * tmp20
tmp22 = tmp19 + tmp21
tmp23 = 3.0
tmp24 = tmp22 / tmp23
tmp25 = 0.001
tmp26 = tmp24 + tmp25
tmp27 = libdevice.rsqrt(tmp26)
tmp28 = tmp11 * tmp27
tmp29 = tmp0 * tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + x3, tmp31, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mean_mul_rsqrt_sub_var_0[grid(256)](primals_2,
primals_1, primals_3, buf0, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
del primals_3
return buf0, primals_1
class ChannelNorm2DNew(nn.Module):
"""
Similar to default Torch instanceNorm2D but calculates
moments over channel dimension instead of spatial dims.
Expects input_dim in format (B,C,H,W)
"""
def __init__(self, input_channels, momentum=0.1, eps=0.001, affine=True,
**kwargs):
super(ChannelNorm2DNew, self).__init__()
self.momentum = momentum
self.eps = eps
self.affine = affine
if affine is True:
self.gamma = nn.Parameter(torch.ones(1, input_channels, 1, 1))
self.beta = nn.Parameter(torch.zeros(1, input_channels, 1, 1))
def forward(self, input_0):
primals_2 = self.gamma
primals_3 = self.beta
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| sedrickkeh/high-fidelity-dual-image | ChannelNorm2D | false | 16,375 | [
"Apache-2.0"
] | 266 | 9cefd378467826b91596653df38666e469bb23e0 | https://github.com/sedrickkeh/high-fidelity-dual-image/tree/9cefd378467826b91596653df38666e469bb23e0 |
Cnn | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/oh/cohhzozgklcdr3g2cpdmnac2zvbvmk53smneafef4zekz5p2kieu.py
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# relu => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 8
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5z/c5z4ugnz2tz6etzq5gfjssvgajfzqbzizudkvgjbmhjkyjb3kwgl.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = (xindex // 32)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/se/csedlwuxzhprkb3ecn6kgrmxlmezbyqcrjpioj4gtemlvmwagg6r.py
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# relu_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 49152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 12
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/4c/c4cz34smoctu7b33jzz7en4rchi4apm2gtczdgg6hlizghqknrib.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_1 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 12288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/c3/cc37wvituo2asffgdbn2cnuhsr4nuj5pzt75pvxxxx4t7tdtdkqj.py
# Topologically Sorted Source Nodes: [conv2d_2, relu_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# relu_2 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ec/cecfzmb7lcmubnp7axcg34e5wnqbt6562ckr2vzbwja7yvxfo3xj.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_2 => getitem_4, getitem_5
# Graph fragment:
# %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {})
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/uy/cuyg775h5n6l7uvtibxdz6ehlnkmaz5fxgmjvhswthhapsgvtqpl.py
# Topologically Sorted Source Nodes: [conv2d_3, x_3], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_3 => convolution_3
# x_3 => relu_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_8, %primals_9, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
# %le_3 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_6 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_6(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/2s/c2sw6qculg7ddjsrvcztrmpih5wsdfzftykdv4ritiictnovolze.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_5 => relu_4
# Graph fragment:
# %add_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_11), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_2,), kwargs = {})
triton_poi_fused_relu_7 = async_compile.triton('triton_poi_fused_relu_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 100
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/kx/ckxnhe4wnaqnkpf7dldzrgo4ne6c6ejtomnv6hgrwgx5bufq4xk3.py
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_6 => relu_5
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_13), kwargs = {})
# %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_8 = async_compile.triton('triton_poi_fused_relu_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 40
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 10
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ds/cds7svddy7rwct3dmzyzl3nhnvmmlzugi2jujmhplve3tximlbdo.py
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_7 => relu_6
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_15), kwargs = {})
# %relu_6 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_6, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_9 = async_compile.triton('triton_poi_fused_relu_threshold_backward_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_9(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 12
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 3
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15 = args
args.clear()
assert_size_stride(primals_1, (8, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (8, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (12, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_5, (12, ), (1, ))
assert_size_stride(primals_6, (16, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_7, (16, ), (1, ))
assert_size_stride(primals_8, (32, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_9, (32, ), (1, ))
assert_size_stride(primals_10, (100, 2048), (2048, 1))
assert_size_stride(primals_11, (100, ), (1, ))
assert_size_stride(primals_12, (10, 100), (100, 1))
assert_size_stride(primals_13, (10, ), (1, ))
assert_size_stride(primals_14, (3, 10), (10, 1))
assert_size_stride(primals_15, (3, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 131072, grid=grid(131072), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 8, 32, 32), (8192, 1024, 32, 1), torch.float32)
buf3 = empty_strided_cuda((4, 8, 32, 32), (8192, 1024, 32, 1), torch.int8)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 32768, grid=grid(32768), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 12, 32, 32), (12288, 1024, 32, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf5, primals_5, 49152, grid=grid(49152), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((4, 12, 16, 16), (3072, 256, 16, 1), torch.float32)
buf7 = empty_strided_cuda((4, 12, 16, 16), (3072, 256, 16, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 12288, grid=grid(12288), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 16, 16, 16), (4096, 256, 16, 1))
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, relu_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf9, primals_7, 16384, grid=grid(16384), stream=stream0)
del primals_7
buf10 = empty_strided_cuda((4, 16, 8, 8), (1024, 64, 8, 1), torch.float32)
buf11 = empty_strided_cuda((4, 16, 8, 8), (1024, 64, 8, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_5.run(buf9, buf10, buf11, 4096, grid=grid(4096), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf10, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 32, 8, 8), (2048, 64, 8, 1))
buf13 = buf12; del buf12 # reuse
buf21 = empty_strided_cuda((4, 32, 8, 8), (2048, 64, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_3, x_3], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_6.run(buf13, primals_9, buf21, 8192, grid=grid(8192), stream=stream0)
del primals_9
buf14 = empty_strided_cuda((4, 100), (100, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf13, (4, 2048), (2048, 1), 0), reinterpret_tensor(primals_10, (2048, 100), (1, 2048), 0), out=buf14)
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu]
triton_poi_fused_relu_7.run(buf15, primals_11, 400, grid=grid(400), stream=stream0)
del primals_11
buf16 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf15, reinterpret_tensor(primals_12, (100, 10), (1, 100), 0), out=buf16)
buf17 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.relu]
triton_poi_fused_relu_8.run(buf17, primals_13, 40, grid=grid(40), stream=stream0)
del primals_13
buf18 = empty_strided_cuda((4, 3), (3, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf17, reinterpret_tensor(primals_14, (10, 3), (1, 10), 0), out=buf18)
buf19 = buf18; del buf18 # reuse
buf20 = empty_strided_cuda((4, 3), (3, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_9.run(buf19, primals_15, buf20, 12, grid=grid(12), stream=stream0)
del primals_15
return (buf19, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, buf11, reinterpret_tensor(buf13, (4, 2048), (2048, 1), 0), buf15, buf17, buf20, primals_14, primals_12, primals_10, buf21, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((8, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((12, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((16, 12, 3, 3), (108, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((32, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((100, 2048), (2048, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((10, 100), (100, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((3, 10), (10, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Cnn(nn.Module):
def __init__(self):
super(Cnn, self).__init__()
None
self.maxpool = nn.MaxPool2d(2)
self.conv1 = nn.Conv2d(3, 8, 3, padding=1)
self.conv2 = nn.Conv2d(8, 12, 3, padding=1)
self.conv3 = nn.Conv2d(12, 16, 3, padding=1)
self.conv4 = nn.Conv2d(16, 32, 3, padding=1)
self.fc1 = nn.Linear(2048, 100)
self.fc2 = nn.Linear(100, 10)
self.fc3 = nn.Linear(10, 3)
def forward(self, x):
x = self.maxpool(F.relu(self.conv1(x)))
x = self.maxpool(F.relu(self.conv2(x)))
x = self.maxpool(F.relu(self.conv3(x)))
x = F.relu(self.conv4(x))
x = x.view(-1, 2048)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 8
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 12
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_6(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, None)
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 100
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 40
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 10
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_9(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 12
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 3
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15) = args
args.clear()
assert_size_stride(primals_1, (8, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (12, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_5, (12,), (1,))
assert_size_stride(primals_6, (16, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_7, (16,), (1,))
assert_size_stride(primals_8, (32, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_9, (32,), (1,))
assert_size_stride(primals_10, (100, 2048), (2048, 1))
assert_size_stride(primals_11, (100,), (1,))
assert_size_stride(primals_12, (10, 100), (100, 1))
assert_size_stride(primals_13, (10,), (1,))
assert_size_stride(primals_14, (3, 10), (10, 1))
assert_size_stride(primals_15, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(131072)](buf1, primals_2,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 8, 32, 32), (8192, 1024, 32, 1),
torch.float32)
buf3 = empty_strided_cuda((4, 8, 32, 32), (8192, 1024, 32, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(32768)](buf1, buf2,
buf3, 32768, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 12, 32, 32), (12288, 1024, 32, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(49152)](buf5, primals_5,
49152, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 12, 16, 16), (3072, 256, 16, 1),
torch.float32)
buf7 = empty_strided_cuda((4, 12, 16, 16), (3072, 256, 16, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(12288)](buf5, buf6,
buf7, 12288, XBLOCK=256, num_warps=4, num_stages=1)
buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 16, 16, 16), (4096, 256, 16, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_4[grid(16384)](buf9, primals_7,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((4, 16, 8, 8), (1024, 64, 8, 1), torch.
float32)
buf11 = empty_strided_cuda((4, 16, 8, 8), (1024, 64, 8, 1), torch.int8)
triton_poi_fused_max_pool2d_with_indices_5[grid(4096)](buf9, buf10,
buf11, 4096, XBLOCK=128, num_warps=4, num_stages=1)
buf12 = extern_kernels.convolution(buf10, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 32, 8, 8), (2048, 64, 8, 1))
buf13 = buf12
del buf12
buf21 = empty_strided_cuda((4, 32, 8, 8), (2048, 64, 8, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_6[grid(8192)](
buf13, primals_9, buf21, 8192, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_9
buf14 = empty_strided_cuda((4, 100), (100, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf13, (4, 2048), (2048, 1), 0
), reinterpret_tensor(primals_10, (2048, 100), (1, 2048), 0),
out=buf14)
buf15 = buf14
del buf14
triton_poi_fused_relu_7[grid(400)](buf15, primals_11, 400, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_11
buf16 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.mm(buf15, reinterpret_tensor(primals_12, (100, 10),
(1, 100), 0), out=buf16)
buf17 = buf16
del buf16
triton_poi_fused_relu_8[grid(40)](buf17, primals_13, 40, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_13
buf18 = empty_strided_cuda((4, 3), (3, 1), torch.float32)
extern_kernels.mm(buf17, reinterpret_tensor(primals_14, (10, 3), (1,
10), 0), out=buf18)
buf19 = buf18
del buf18
buf20 = empty_strided_cuda((4, 3), (3, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_9[grid(12)](buf19,
primals_15, buf20, 12, XBLOCK=16, num_warps=1, num_stages=1)
del primals_15
return (buf19, primals_1, primals_3, primals_4, primals_6, primals_8,
buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, buf11,
reinterpret_tensor(buf13, (4, 2048), (2048, 1), 0), buf15, buf17,
buf20, primals_14, primals_12, primals_10, buf21)
class CnnNew(nn.Module):
def __init__(self):
super(CnnNew, self).__init__()
None
self.maxpool = nn.MaxPool2d(2)
self.conv1 = nn.Conv2d(3, 8, 3, padding=1)
self.conv2 = nn.Conv2d(8, 12, 3, padding=1)
self.conv3 = nn.Conv2d(12, 16, 3, padding=1)
self.conv4 = nn.Conv2d(16, 32, 3, padding=1)
self.fc1 = nn.Linear(2048, 100)
self.fc2 = nn.Linear(100, 10)
self.fc3 = nn.Linear(10, 3)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.fc1.weight
primals_11 = self.fc1.bias
primals_12 = self.fc2.weight
primals_13 = self.fc2.bias
primals_14 = self.fc3.weight
primals_15 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15])
return output[0]
| satinder147/DeepWay.v2 | Cnn | false | 16,376 | [
"BSD-2-Clause"
] | 57 | c8fca77783ea39f3d17066600d89baf8d0d19a52 | https://github.com/satinder147/DeepWay.v2/tree/c8fca77783ea39f3d17066600d89baf8d0d19a52 |
ConvNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ra/crac4pwlmyctghcm4ewambrd5ocipynb73tkugxbnkqavf5y4xsk.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x => convolution
# x_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2, 2], [2, 2, 2], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2097152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 32768) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/jn/cjnnrz6o7wjzdnedat4ufm63jrcyri6sr64uvbtbz4mtgv55tz2h.py
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_2 => convolution_1
# x_3 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [2, 2, 2], [1, 1, 1], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/vd/cvdb2pqvjvzobqaw4di5vynhhmn7qfo4zlm5dqfxpafrkpcmtc75.py
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_4 => convolution_2
# x_5 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [2, 2, 2], [1, 1, 1], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 512) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/3s/c3s6dmarsodkcv3xpuv4tcxco6fx4hzevhmhxf6z7wpvxzp2beex.py
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# x_8 => add, add_1, convert_element_type, convert_element_type_1, iota, mul, mul_1
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (10,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota, 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 0), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add, torch.float32), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 0.8), kwargs = {})
# %convert_element_type_1 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_1, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_3 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_3(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 10
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.8
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/zd/czdla3bssm4ppmd2ry2xwsijcoucbuz4xtgxugmpdrh25wi7idoj.py
# Topologically Sorted Source Nodes: [x_6, x_7, x_8], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index]
# Source node to ATen node mapping:
# x_6 => convolution_3
# x_7 => relu_3
# x_8 => _unsafe_index
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [1, 1, 1], [1, 1, 1], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
# %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_3, [None, None, %unsqueeze_1, %unsqueeze, %convert_element_type_1]), kwargs = {})
triton_poi_fused__unsafe_index_convolution_relu_4 = async_compile.triton('triton_poi_fused__unsafe_index_convolution_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_convolution_relu_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_relu_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = (xindex // 100) % 10
x1 = (xindex // 10) % 10
x0 = xindex % 10
x6 = (xindex // 1000)
x3 = (xindex // 1000) % 64
x7 = xindex
tmp0 = tl.load(in_ptr0 + (x2), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr2 + (x3), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 8, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp10 = tmp9 + tmp1
tmp11 = tmp9 < 0
tmp12 = tl.where(tmp11, tmp10, tmp9)
tmp13 = tl.load(in_ptr1 + (tmp12 + (8*tmp8) + (64*tmp4) + (512*x6)), None, eviction_policy='evict_last')
tmp15 = tmp13 + tmp14
tmp16 = tl.full([1], 0, tl.int32)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tl.store(out_ptr0 + (x7), tmp17, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/sw/cswfp3azebghgh53gjwvnbssfuznqfspischguu6lu3v7atlc4nv.py
# Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# x_11 => add_6, add_7, convert_element_type_6, convert_element_type_7, iota_3, mul_6, mul_7
# Graph fragment:
# %iota_3 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (20,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_3, 1), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_6, 0), kwargs = {})
# %convert_element_type_6 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_6, torch.float32), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_6, 0.0), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_7, 0.5), kwargs = {})
# %convert_element_type_7 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_7, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_5 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_5(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 20
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/hx/chxxo3cghpi54n767oewdamufk2s6wpv3jh7snzelq7obw5l53kg.py
# Topologically Sorted Source Nodes: [x_9, x_10, x_11], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index]
# Source node to ATen node mapping:
# x_10 => relu_4
# x_11 => _unsafe_index_1
# x_9 => convolution_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index, %primals_10, %primals_11, [1, 1, 1], [1, 1, 1], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
# %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_4, [None, None, %unsqueeze_4, %unsqueeze_3, %convert_element_type_7]), kwargs = {})
triton_poi_fused__unsafe_index_convolution_relu_6 = async_compile.triton('triton_poi_fused__unsafe_index_convolution_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_convolution_relu_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_relu_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = (xindex // 400) % 20
x1 = (xindex // 20) % 20
x0 = xindex % 20
x6 = (xindex // 8000)
x3 = (xindex // 8000) % 32
x7 = xindex
tmp0 = tl.load(in_ptr0 + (x2), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr2 + (x3), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 10, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp10 = tmp9 + tmp1
tmp11 = tmp9 < 0
tmp12 = tl.where(tmp11, tmp10, tmp9)
tmp13 = tl.load(in_ptr1 + (tmp12 + (10*tmp8) + (100*tmp4) + (1000*x6)), None, eviction_policy='evict_last')
tmp15 = tmp13 + tmp14
tmp16 = tl.full([1], 0, tl.int32)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tl.store(out_ptr0 + (x7), tmp17, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/r4/cr4z2vzhu4vchn2hh46fwgg6zruj4o443kjmojgydps62udnaqgt.py
# Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# x_14 => add_12, add_13, convert_element_type_12, convert_element_type_13, iota_6, mul_12, mul_13
# Graph fragment:
# %iota_6 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (40,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_6, 1), kwargs = {})
# %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_12, 0), kwargs = {})
# %convert_element_type_12 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_12, torch.float32), kwargs = {})
# %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_12, 0.0), kwargs = {})
# %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_13, 0.5), kwargs = {})
# %convert_element_type_13 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_13, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_7 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_7(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 40
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/uc/cucsjeqyo7srusbz2guvnq5xc3boibj4cqgj5djhdhgysyh6blmt.py
# Topologically Sorted Source Nodes: [x_12, x_13, x_14], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index]
# Source node to ATen node mapping:
# x_12 => convolution_5
# x_13 => relu_5
# x_14 => _unsafe_index_2
# Graph fragment:
# %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_12, %primals_13, [1, 1, 1], [2, 2, 2], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
# %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_5,), kwargs = {})
# %_unsafe_index_2 : [num_users=4] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_5, [None, None, %unsqueeze_7, %unsqueeze_6, %convert_element_type_13]), kwargs = {})
triton_poi_fused__unsafe_index_convolution_relu_8 = async_compile.triton('triton_poi_fused__unsafe_index_convolution_relu_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4194304],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_convolution_relu_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_relu_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = (xindex // 1600) % 40
x1 = (xindex // 40) % 40
x0 = xindex % 40
x6 = (xindex // 64000)
x3 = (xindex // 64000) % 16
x7 = xindex
tmp0 = tl.load(in_ptr0 + (x2), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr2 + (x3), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 20, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp10 = tmp9 + tmp1
tmp11 = tmp9 < 0
tmp12 = tl.where(tmp11, tmp10, tmp9)
tmp13 = tl.load(in_ptr1 + (tmp12 + (20*tmp8) + (400*tmp4) + (8000*x6)), None, eviction_policy='evict_last')
tmp15 = tmp13 + tmp14
tmp16 = tl.full([1], 0, tl.int32)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tl.store(out_ptr0 + (x7), tmp17, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/x6/cx652tuogq2gk4cwkkzwedh5h56tjkb22jq6dxjr2z2xzfan2aup.py
# Topologically Sorted Source Nodes: [conv3d_6, qual_out], Original ATen: [aten.convolution, aten.sigmoid]
# Source node to ATen node mapping:
# conv3d_6 => convolution_6
# qual_out => sigmoid
# Graph fragment:
# %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_2, %primals_14, %primals_15, [1, 1, 1], [2, 2, 2], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_6,), kwargs = {})
triton_poi_fused_convolution_sigmoid_9 = async_compile.triton('triton_poi_fused_convolution_sigmoid_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_sigmoid_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), None)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + (x0), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/jv/cjvetez4z6cireprc6rd6yh2vdznng7s7zgshvahb5vcnuku7tew.py
# Topologically Sorted Source Nodes: [conv3d_7], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv3d_7 => convolution_7
# Graph fragment:
# %convolution_7 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_2, %primals_16, %primals_17, [1, 1, 1], [2, 2, 2], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
triton_poi_fused_convolution_10 = async_compile.triton('triton_poi_fused_convolution_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64000) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/vl/cvl7dyani3kht747br6sxbgmejlia3rk3soiq2db6qqrkww6rfs5.py
# Topologically Sorted Source Nodes: [rot_out], Original ATen: [aten.div]
# Source node to ATen node mapping:
# rot_out => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%convolution_7, %expand), kwargs = {})
triton_poi_fused_div_11 = async_compile.triton('triton_poi_fused_div_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_11(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 64000
x2 = (xindex // 256000)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x0 + (256000*x2)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (64000 + x0 + (256000*x2)), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (128000 + x0 + (256000*x2)), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (192000 + x0 + (256000*x2)), None, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/zm/czmcfy6sbb3qcyk2mmt3h7tjzkgupjsmg3wbrf2jv5kx6lbt7wfx.py
# Topologically Sorted Source Nodes: [width_out], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# width_out => convolution_8
# Graph fragment:
# %convolution_8 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_2, %primals_18, %primals_19, [1, 1, 1], [2, 2, 2], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
triton_poi_fused_convolution_12 = async_compile.triton('triton_poi_fused_convolution_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_12', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_12(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), None)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + (x0), tmp3, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/qa/cqao2iv4w2wixgu2o42nbp6ne4lvvfxd5vm74qycfroavtbv4wrk.py
# Topologically Sorted Source Nodes: [x_12, x_13], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_12 => convolution_5
# x_13 => relu_5
# Graph fragment:
# %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_12, %primals_13, [1, 1, 1], [2, 2, 2], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
# %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_5,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_5, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_13 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_13(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 8000) % 16
x0 = xindex % 8000
x4 = (xindex // 8000)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x0 + (8064*x4)), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/jr/cjrtd5bpfe3olttbxch2upzstlithlunidfqh4sdjwgvtjoatxlg.py
# Topologically Sorted Source Nodes: [x_9, x_10], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_10 => relu_4
# x_9 => convolution_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index, %primals_10, %primals_11, [1, 1, 1], [1, 1, 1], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_4, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_14 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_14(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 1000) % 32
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/z7/cz7jqhxg3cfgb3yrls57nyewlnoa6llc25b3loswiwb2f3y6zqtd.py
# Topologically Sorted Source Nodes: [x_6, x_7], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_6 => convolution_3
# x_7 => relu_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [1, 1, 1], [1, 1, 1], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_15 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_15(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 512) % 64
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19 = args
args.clear()
assert_size_stride(primals_1, (16, 1, 5, 5, 5), (125, 125, 25, 5, 1))
assert_size_stride(primals_2, (16, ), (1, ))
assert_size_stride(primals_3, (4, 1, 64, 64, 64), (262144, 262144, 4096, 64, 1))
assert_size_stride(primals_4, (32, 16, 3, 3, 3), (432, 27, 9, 3, 1))
assert_size_stride(primals_5, (32, ), (1, ))
assert_size_stride(primals_6, (64, 32, 3, 3, 3), (864, 27, 9, 3, 1))
assert_size_stride(primals_7, (64, ), (1, ))
assert_size_stride(primals_8, (64, 64, 3, 3, 3), (1728, 27, 9, 3, 1))
assert_size_stride(primals_9, (64, ), (1, ))
assert_size_stride(primals_10, (32, 64, 3, 3, 3), (1728, 27, 9, 3, 1))
assert_size_stride(primals_11, (32, ), (1, ))
assert_size_stride(primals_12, (16, 32, 5, 5, 5), (4000, 125, 25, 5, 1))
assert_size_stride(primals_13, (16, ), (1, ))
assert_size_stride(primals_14, (1, 16, 5, 5, 5), (2000, 125, 25, 5, 1))
assert_size_stride(primals_15, (1, ), (1, ))
assert_size_stride(primals_16, (4, 16, 5, 5, 5), (2000, 125, 25, 5, 1))
assert_size_stride(primals_17, (4, ), (1, ))
assert_size_stride(primals_18, (1, 16, 5, 5, 5), (2000, 125, 25, 5, 1))
assert_size_stride(primals_19, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2, 2), padding=(2, 2, 2), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 32, 32, 32), (524288, 32768, 1024, 32, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 2097152, grid=grid(2097152), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2, 2), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 16, 16, 16), (131072, 4096, 256, 16, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf3, primals_5, 524288, grid=grid(524288), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2, 2), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 8, 8, 8), (32768, 512, 64, 8, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf5, primals_7, 131072, grid=grid(131072), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 8, 8, 8), (32768, 512, 64, 8, 1))
buf7 = empty_strided_cuda((10, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_3.run(buf7, 10, grid=grid(10), stream=stream0)
buf8 = empty_strided_cuda((4, 64, 10, 10, 10), (64000, 1000, 100, 10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_6, x_7, x_8], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index]
triton_poi_fused__unsafe_index_convolution_relu_4.run(buf7, buf6, primals_9, buf8, 256000, grid=grid(256000), stream=stream0)
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf8, primals_10, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 32, 10, 10, 10), (32000, 1000, 100, 10, 1))
buf10 = empty_strided_cuda((20, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_5.run(buf10, 20, grid=grid(20), stream=stream0)
buf11 = empty_strided_cuda((4, 32, 20, 20, 20), (256000, 8000, 400, 20, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_9, x_10, x_11], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index]
triton_poi_fused__unsafe_index_convolution_relu_6.run(buf10, buf9, primals_11, buf11, 1024000, grid=grid(1024000), stream=stream0)
# Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf11, primals_12, stride=(1, 1, 1), padding=(2, 2, 2), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 16, 20, 20, 20), (128000, 8000, 400, 20, 1))
buf13 = empty_strided_cuda((40, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_7.run(buf13, 40, grid=grid(40), stream=stream0)
buf14 = empty_strided_cuda((4, 16, 40, 40, 40), (1024000, 64000, 1600, 40, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_12, x_13, x_14], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index]
triton_poi_fused__unsafe_index_convolution_relu_8.run(buf13, buf12, primals_13, buf14, 4096000, grid=grid(4096000), stream=stream0)
# Topologically Sorted Source Nodes: [conv3d_6], Original ATen: [aten.convolution]
buf15 = extern_kernels.convolution(buf14, primals_14, stride=(1, 1, 1), padding=(2, 2, 2), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 1, 40, 40, 40), (64000, 64000, 1600, 40, 1))
buf16 = buf15; del buf15 # reuse
# Topologically Sorted Source Nodes: [conv3d_6, qual_out], Original ATen: [aten.convolution, aten.sigmoid]
triton_poi_fused_convolution_sigmoid_9.run(buf16, primals_15, 256000, grid=grid(256000), stream=stream0)
del primals_15
# Topologically Sorted Source Nodes: [conv3d_7], Original ATen: [aten.convolution]
buf17 = extern_kernels.convolution(buf14, primals_16, stride=(1, 1, 1), padding=(2, 2, 2), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 4, 40, 40, 40), (256000, 64000, 1600, 40, 1))
buf18 = buf17; del buf17 # reuse
# Topologically Sorted Source Nodes: [conv3d_7], Original ATen: [aten.convolution]
triton_poi_fused_convolution_10.run(buf18, primals_17, 1024000, grid=grid(1024000), stream=stream0)
del primals_17
buf19 = empty_strided_cuda((4, 4, 40, 40, 40), (256000, 64000, 1600, 40, 1), torch.float32)
# Topologically Sorted Source Nodes: [rot_out], Original ATen: [aten.div]
triton_poi_fused_div_11.run(buf18, buf19, 1024000, grid=grid(1024000), stream=stream0)
# Topologically Sorted Source Nodes: [width_out], Original ATen: [aten.convolution]
buf20 = extern_kernels.convolution(buf14, primals_18, stride=(1, 1, 1), padding=(2, 2, 2), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 1, 40, 40, 40), (64000, 64000, 1600, 40, 1))
buf21 = buf20; del buf20 # reuse
# Topologically Sorted Source Nodes: [width_out], Original ATen: [aten.convolution]
triton_poi_fused_convolution_12.run(buf21, primals_19, 256000, grid=grid(256000), stream=stream0)
del primals_19
buf22 = empty_strided_cuda((4, 16, 20, 20, 20), (129024, 8064, 400, 20, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_12, x_13], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_13.run(buf12, primals_13, buf22, 512000, grid=grid(512000), stream=stream0)
del buf12
del primals_13
buf23 = empty_strided_cuda((4, 32, 10, 10, 10), (32000, 1000, 100, 10, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_9, x_10], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_14.run(buf9, primals_11, buf23, 128000, grid=grid(128000), stream=stream0)
del buf9
del primals_11
buf24 = empty_strided_cuda((4, 64, 8, 8, 8), (32768, 512, 64, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_6, x_7], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_15.run(buf6, primals_9, buf24, 131072, grid=grid(131072), stream=stream0)
del buf6
del primals_9
return (buf16, buf19, buf21, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, buf1, buf3, buf5, buf7, buf8, buf10, buf11, buf13, buf14, buf16, buf18, buf22, buf23, buf24, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((16, 1, 5, 5, 5), (125, 125, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 64, 64, 64), (262144, 262144, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 16, 3, 3, 3), (432, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((64, 32, 3, 3, 3), (864, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((64, 64, 3, 3, 3), (1728, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((32, 64, 3, 3, 3), (1728, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((16, 32, 5, 5, 5), (4000, 125, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((1, 16, 5, 5, 5), (2000, 125, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((4, 16, 5, 5, 5), (2000, 125, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((1, 16, 5, 5, 5), (2000, 125, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.utils.data
import torch.nn as nn
def conv(in_channels, out_channels, kernel_size):
return nn.Conv3d(in_channels, out_channels, kernel_size, padding=
kernel_size // 2)
def conv_stride(in_channels, out_channels, kernel_size):
return nn.Conv3d(in_channels, out_channels, kernel_size, stride=2,
padding=kernel_size // 2)
class Decoder(nn.Module):
def __init__(self, in_channels, filters, kernels):
super().__init__()
self.conv1 = conv(in_channels, filters[0], kernels[0])
self.conv2 = conv(filters[0], filters[1], kernels[1])
self.conv3 = conv(filters[1], filters[2], kernels[2])
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = F.interpolate(x, 10)
x = self.conv2(x)
x = F.relu(x)
x = F.interpolate(x, 20)
x = self.conv3(x)
x = F.relu(x)
x = F.interpolate(x, 40)
return x
class Encoder(nn.Module):
def __init__(self, in_channels, filters, kernels):
super().__init__()
self.conv1 = conv_stride(in_channels, filters[0], kernels[0])
self.conv2 = conv_stride(filters[0], filters[1], kernels[1])
self.conv3 = conv_stride(filters[1], filters[2], kernels[2])
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = self.conv3(x)
x = F.relu(x)
return x
class ConvNet(nn.Module):
def __init__(self):
super().__init__()
self.encoder = Encoder(1, [16, 32, 64], [5, 3, 3])
self.decoder = Decoder(64, [64, 32, 16], [3, 3, 5])
self.conv_qual = conv(16, 1, 5)
self.conv_rot = conv(16, 4, 5)
self.conv_width = conv(16, 1, 5)
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
qual_out = torch.sigmoid(self.conv_qual(x))
rot_out = F.normalize(self.conv_rot(x), dim=1)
width_out = self.conv_width(x)
return qual_out, rot_out, width_out
def get_inputs():
return [torch.rand([4, 1, 64, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn.functional as F
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 32768 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 512 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_3(out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 10
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.8
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_relu_4(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 100 % 10
x1 = xindex // 10 % 10
x0 = xindex % 10
x6 = xindex // 1000
x3 = xindex // 1000 % 64
x7 = xindex
tmp0 = tl.load(in_ptr0 + x2, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr2 + x3, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 8, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp10 = tmp9 + tmp1
tmp11 = tmp9 < 0
tmp12 = tl.where(tmp11, tmp10, tmp9)
tmp13 = tl.load(in_ptr1 + (tmp12 + 8 * tmp8 + 64 * tmp4 + 512 * x6),
None, eviction_policy='evict_last')
tmp15 = tmp13 + tmp14
tmp16 = tl.full([1], 0, tl.int32)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tl.store(out_ptr0 + x7, tmp17, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_5(out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 20
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_relu_6(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 400 % 20
x1 = xindex // 20 % 20
x0 = xindex % 20
x6 = xindex // 8000
x3 = xindex // 8000 % 32
x7 = xindex
tmp0 = tl.load(in_ptr0 + x2, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr2 + x3, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 10, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp10 = tmp9 + tmp1
tmp11 = tmp9 < 0
tmp12 = tl.where(tmp11, tmp10, tmp9)
tmp13 = tl.load(in_ptr1 + (tmp12 + 10 * tmp8 + 100 * tmp4 + 1000 * x6),
None, eviction_policy='evict_last')
tmp15 = tmp13 + tmp14
tmp16 = tl.full([1], 0, tl.int32)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tl.store(out_ptr0 + x7, tmp17, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_7(out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 40
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_relu_8(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 1600 % 40
x1 = xindex // 40 % 40
x0 = xindex % 40
x6 = xindex // 64000
x3 = xindex // 64000 % 16
x7 = xindex
tmp0 = tl.load(in_ptr0 + x2, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr2 + x3, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 20, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp10 = tmp9 + tmp1
tmp11 = tmp9 < 0
tmp12 = tl.where(tmp11, tmp10, tmp9)
tmp13 = tl.load(in_ptr1 + (tmp12 + 20 * tmp8 + 400 * tmp4 + 8000 * x6),
None, eviction_policy='evict_last')
tmp15 = tmp13 + tmp14
tmp16 = tl.full([1], 0, tl.int32)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tl.store(out_ptr0 + x7, tmp17, None)
@triton.jit
def triton_poi_fused_convolution_sigmoid_9(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64000 % 4
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_div_11(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 64000
x2 = xindex // 256000
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + (x0 + 256000 * x2), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (64000 + x0 + 256000 * x2), None,
eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (128000 + x0 + 256000 * x2), None,
eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (192000 + x0 + 256000 * x2), None,
eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_poi_fused_convolution_12(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_13(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 8000 % 16
x0 = xindex % 8000
x4 = xindex // 8000
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x0 + 8064 * x4), tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_14(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 1000 % 32
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_15(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 512 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19) = args
args.clear()
assert_size_stride(primals_1, (16, 1, 5, 5, 5), (125, 125, 25, 5, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64, 64), (262144, 262144, 4096,
64, 1))
assert_size_stride(primals_4, (32, 16, 3, 3, 3), (432, 27, 9, 3, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (64, 32, 3, 3, 3), (864, 27, 9, 3, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (64, 64, 3, 3, 3), (1728, 27, 9, 3, 1))
assert_size_stride(primals_9, (64,), (1,))
assert_size_stride(primals_10, (32, 64, 3, 3, 3), (1728, 27, 9, 3, 1))
assert_size_stride(primals_11, (32,), (1,))
assert_size_stride(primals_12, (16, 32, 5, 5, 5), (4000, 125, 25, 5, 1))
assert_size_stride(primals_13, (16,), (1,))
assert_size_stride(primals_14, (1, 16, 5, 5, 5), (2000, 125, 25, 5, 1))
assert_size_stride(primals_15, (1,), (1,))
assert_size_stride(primals_16, (4, 16, 5, 5, 5), (2000, 125, 25, 5, 1))
assert_size_stride(primals_17, (4,), (1,))
assert_size_stride(primals_18, (1, 16, 5, 5, 5), (2000, 125, 25, 5, 1))
assert_size_stride(primals_19, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2,
2, 2), padding=(2, 2, 2), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 32, 32, 32), (524288, 32768, 1024,
32, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(2097152)](buf1, primals_2,
2097152, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2, 2),
padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 16, 16, 16), (131072, 4096, 256,
16, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(524288)](buf3, primals_5,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2, 2),
padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 8, 8, 8), (32768, 512, 64, 8, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(131072)](buf5, primals_7,
131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1, 1),
padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 8, 8, 8), (32768, 512, 64, 8, 1))
buf7 = empty_strided_cuda((10,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_3[grid(10)](buf7, 10,
XBLOCK=16, num_warps=1, num_stages=1)
buf8 = empty_strided_cuda((4, 64, 10, 10, 10), (64000, 1000, 100,
10, 1), torch.float32)
triton_poi_fused__unsafe_index_convolution_relu_4[grid(256000)](buf7,
buf6, primals_9, buf8, 256000, XBLOCK=512, num_warps=8,
num_stages=1)
buf9 = extern_kernels.convolution(buf8, primals_10, stride=(1, 1, 1
), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 32, 10, 10, 10), (32000, 1000, 100, 10, 1)
)
buf10 = empty_strided_cuda((20,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_5[grid(20)](buf10, 20,
XBLOCK=32, num_warps=1, num_stages=1)
buf11 = empty_strided_cuda((4, 32, 20, 20, 20), (256000, 8000, 400,
20, 1), torch.float32)
triton_poi_fused__unsafe_index_convolution_relu_6[grid(1024000)](buf10,
buf9, primals_11, buf11, 1024000, XBLOCK=1024, num_warps=4,
num_stages=1)
buf12 = extern_kernels.convolution(buf11, primals_12, stride=(1, 1,
1), padding=(2, 2, 2), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 16, 20, 20, 20), (128000, 8000, 400,
20, 1))
buf13 = empty_strided_cuda((40,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_7[grid(40)](buf13, 40,
XBLOCK=64, num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((4, 16, 40, 40, 40), (1024000, 64000,
1600, 40, 1), torch.float32)
triton_poi_fused__unsafe_index_convolution_relu_8[grid(4096000)](buf13,
buf12, primals_13, buf14, 4096000, XBLOCK=1024, num_warps=4,
num_stages=1)
buf15 = extern_kernels.convolution(buf14, primals_14, stride=(1, 1,
1), padding=(2, 2, 2), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 1, 40, 40, 40), (64000, 64000, 1600,
40, 1))
buf16 = buf15
del buf15
triton_poi_fused_convolution_sigmoid_9[grid(256000)](buf16,
primals_15, 256000, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_15
buf17 = extern_kernels.convolution(buf14, primals_16, stride=(1, 1,
1), padding=(2, 2, 2), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 4, 40, 40, 40), (256000, 64000, 1600,
40, 1))
buf18 = buf17
del buf17
triton_poi_fused_convolution_10[grid(1024000)](buf18, primals_17,
1024000, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_17
buf19 = empty_strided_cuda((4, 4, 40, 40, 40), (256000, 64000, 1600,
40, 1), torch.float32)
triton_poi_fused_div_11[grid(1024000)](buf18, buf19, 1024000,
XBLOCK=512, num_warps=8, num_stages=1)
buf20 = extern_kernels.convolution(buf14, primals_18, stride=(1, 1,
1), padding=(2, 2, 2), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 1, 40, 40, 40), (64000, 64000, 1600,
40, 1))
buf21 = buf20
del buf20
triton_poi_fused_convolution_12[grid(256000)](buf21, primals_19,
256000, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_19
buf22 = empty_strided_cuda((4, 16, 20, 20, 20), (129024, 8064, 400,
20, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_13[grid(512000)](
buf12, primals_13, buf22, 512000, XBLOCK=512, num_warps=8,
num_stages=1)
del buf12
del primals_13
buf23 = empty_strided_cuda((4, 32, 10, 10, 10), (32000, 1000, 100,
10, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_14[grid(128000)](
buf9, primals_11, buf23, 128000, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf9
del primals_11
buf24 = empty_strided_cuda((4, 64, 8, 8, 8), (32768, 512, 64, 8, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_15[grid(131072)](
buf6, primals_9, buf24, 131072, XBLOCK=512, num_warps=8,
num_stages=1)
del buf6
del primals_9
return (buf16, buf19, buf21, primals_1, primals_3, primals_4, primals_6,
primals_8, primals_10, primals_12, primals_14, primals_16,
primals_18, buf1, buf3, buf5, buf7, buf8, buf10, buf11, buf13,
buf14, buf16, buf18, buf22, buf23, buf24)
def conv(in_channels, out_channels, kernel_size):
return nn.Conv3d(in_channels, out_channels, kernel_size, padding=
kernel_size // 2)
def conv_stride(in_channels, out_channels, kernel_size):
return nn.Conv3d(in_channels, out_channels, kernel_size, stride=2,
padding=kernel_size // 2)
class Decoder(nn.Module):
def __init__(self, in_channels, filters, kernels):
super().__init__()
self.conv1 = conv(in_channels, filters[0], kernels[0])
self.conv2 = conv(filters[0], filters[1], kernels[1])
self.conv3 = conv(filters[1], filters[2], kernels[2])
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = F.interpolate(x, 10)
x = self.conv2(x)
x = F.relu(x)
x = F.interpolate(x, 20)
x = self.conv3(x)
x = F.relu(x)
x = F.interpolate(x, 40)
return x
class Encoder(nn.Module):
def __init__(self, in_channels, filters, kernels):
super().__init__()
self.conv1 = conv_stride(in_channels, filters[0], kernels[0])
self.conv2 = conv_stride(filters[0], filters[1], kernels[1])
self.conv3 = conv_stride(filters[1], filters[2], kernels[2])
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = self.conv3(x)
x = F.relu(x)
return x
class ConvNetNew(nn.Module):
def __init__(self):
super().__init__()
self.encoder = Encoder(1, [16, 32, 64], [5, 3, 3])
self.decoder = Decoder(64, [64, 32, 16], [3, 3, 5])
self.conv_qual = conv(16, 1, 5)
self.conv_rot = conv(16, 4, 5)
self.conv_width = conv(16, 1, 5)
def forward(self, input_0):
primals_1 = self.encoder.conv1.weight
primals_2 = self.encoder.conv1.bias
primals_4 = self.encoder.conv2.weight
primals_5 = self.encoder.conv2.bias
primals_6 = self.encoder.conv3.weight
primals_7 = self.encoder.conv3.bias
primals_8 = self.decoder.conv1.weight
primals_9 = self.decoder.conv1.bias
primals_10 = self.decoder.conv2.weight
primals_11 = self.decoder.conv2.bias
primals_12 = self.decoder.conv3.weight
primals_13 = self.decoder.conv3.bias
primals_14 = self.conv_qual.weight
primals_15 = self.conv_qual.bias
primals_16 = self.conv_rot.weight
primals_17 = self.conv_rot.bias
primals_18 = self.conv_width.weight
primals_19 = self.conv_width.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19])
return output[0], output[1], output[2]
| runeg96/vgn | ConvNet | false | 16,377 | [
"BSD-3-Clause"
] | 92 | 24278b80935f2a9cd51d20c9e2c5bfe6da4ce53a | https://github.com/runeg96/vgn/tree/24278b80935f2a9cd51d20c9e2c5bfe6da4ce53a |
DiceLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/rl/crl46i56eoov7z55pyaubuvfdftw6jjpujz5bzjjfjrrm66em5i2.py
# Topologically Sorted Source Nodes: [mul, intersection, mul_1, add_2, sum_2, sum_3, add, union, truediv, loss, loss_1], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub, aten.mean]
# Source node to ATen node mapping:
# add => add
# add_2 => add_2
# intersection => sum_1
# loss => sub
# loss_1 => mean
# mul => mul
# mul_1 => mul_1
# sum_2 => sum_2
# sum_3 => sum_3
# truediv => div
# union => add_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %view), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 0.001), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_1, [1]), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view, [1]), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, 0.001), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_2, %add_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub,), kwargs = {})
triton_per_fused_add_div_mean_mul_rsub_sum_0 = async_compile.triton('triton_per_fused_add_div_mean_mul_rsub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_rsub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mean_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.broadcast_to(tmp0, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = tl.broadcast_to(tmp1, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp12 = 2.0
tmp13 = tmp5 * tmp12
tmp14 = 0.001
tmp15 = tmp13 + tmp14
tmp16 = tmp8 + tmp11
tmp17 = tmp16 + tmp14
tmp18 = tmp15 / tmp17
tmp19 = 1.0
tmp20 = tmp19 - tmp18
tmp21 = tmp20 / tmp19
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp21, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, ), (1, ), torch.float32)
buf3 = reinterpret_tensor(buf0, (), (), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [mul, intersection, mul_1, add_2, sum_2, sum_3, add, union, truediv, loss, loss_1], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mean_mul_rsub_sum_0.run(buf3, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.hub
def soft_dice_loss(outputs, targets, per_image=False, reduce=True, ohpm=
False, ohpm_pixels=256 * 256):
batch_size = outputs.size()[0]
eps = 0.001
if not per_image:
batch_size = 1
if ohpm:
dice_target = targets.contiguous().view(-1).float()
dice_output = outputs.contiguous().view(-1)
loss_b = torch.abs(dice_target - dice_output)
_, indc = loss_b.topk(ohpm_pixels)
dice_target = dice_target[indc]
dice_output = dice_output[indc]
intersection = torch.sum(dice_output * dice_target)
union = torch.sum(dice_output) + torch.sum(dice_target) + eps
loss = 1 - (2 * intersection + eps) / union
loss = loss.mean()
else:
dice_target = targets.contiguous().view(batch_size, -1).float()
dice_output = outputs.contiguous().view(batch_size, -1)
intersection = torch.sum(dice_output * dice_target, dim=1)
union = torch.sum(dice_output, dim=1) + torch.sum(dice_target, dim=1
) + eps
loss = 1 - (2 * intersection + eps) / union
if reduce:
loss = loss.mean()
return loss
class DiceLoss(nn.Module):
def __init__(self, weight=None, size_average=True, per_image=False,
ohpm=False, ohpm_pixels=256 * 256):
super().__init__()
self.size_average = size_average
self.register_buffer('weight', weight)
self.per_image = per_image
self.ohpm = ohpm
self.ohpm_pixels = ohpm_pixels
def forward(self, input, target):
return soft_dice_loss(input, target, per_image=self.per_image, ohpm
=self.ohpm, ohpm_pixels=self.ohpm_pixels)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
import torch.hub
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_div_mean_mul_rsub_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.broadcast_to(tmp0, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = tl.broadcast_to(tmp1, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp12 = 2.0
tmp13 = tmp5 * tmp12
tmp14 = 0.001
tmp15 = tmp13 + tmp14
tmp16 = tmp8 + tmp11
tmp17 = tmp16 + tmp14
tmp18 = tmp15 / tmp17
tmp19 = 1.0
tmp20 = tmp19 - tmp18
tmp21 = tmp20 / tmp19
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1,), (1,), torch.float32)
buf3 = reinterpret_tensor(buf0, (), (), 0)
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mean_mul_rsub_sum_0[grid(1)](buf3, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf3,
def soft_dice_loss(outputs, targets, per_image=False, reduce=True, ohpm=
False, ohpm_pixels=256 * 256):
batch_size = outputs.size()[0]
eps = 0.001
if not per_image:
batch_size = 1
if ohpm:
dice_target = targets.contiguous().view(-1).float()
dice_output = outputs.contiguous().view(-1)
loss_b = torch.abs(dice_target - dice_output)
_, indc = loss_b.topk(ohpm_pixels)
dice_target = dice_target[indc]
dice_output = dice_output[indc]
intersection = torch.sum(dice_output * dice_target)
union = torch.sum(dice_output) + torch.sum(dice_target) + eps
loss = 1 - (2 * intersection + eps) / union
loss = loss.mean()
else:
dice_target = targets.contiguous().view(batch_size, -1).float()
dice_output = outputs.contiguous().view(batch_size, -1)
intersection = torch.sum(dice_output * dice_target, dim=1)
union = torch.sum(dice_output, dim=1) + torch.sum(dice_target, dim=1
) + eps
loss = 1 - (2 * intersection + eps) / union
if reduce:
loss = loss.mean()
return loss
class DiceLossNew(nn.Module):
def __init__(self, weight=None, size_average=True, per_image=False,
ohpm=False, ohpm_pixels=256 * 256):
super().__init__()
self.size_average = size_average
self.register_buffer('weight', weight)
self.per_image = per_image
self.ohpm = ohpm
self.ohpm_pixels = ohpm_pixels
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| selimsef/xview2_solution | DiceLoss | false | 16,378 | [
"Apache-2.0"
] | 57 | 5d0caba9c7a9c2707565a189f1a091c86d26b546 | https://github.com/selimsef/xview2_solution/tree/5d0caba9c7a9c2707565a189f1a091c86d26b546 |
RNNCell | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/3l/c3lo77c7wjxasxrhtr6wesb72ods2d2rxnxhbfieun7j2wukm3wn.py
# Topologically Sorted Source Nodes: [combined], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# combined => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 2), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ey/cey6dsgmzj2byupf73e6nwt5fetf5ne2sa57kzcmy7ejvaqhqb72.py
# Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# hidden => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [combined], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 128, grid=grid(128), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (16, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf2, primals_4, buf3, 64, grid=grid(64), stream=stream0)
del primals_4
return (buf2, reinterpret_tensor(buf0, (16, 8), (8, 1), 0), buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class RNNCell(nn.Module):
def __init__(self, embed_dim, hidden_size, vocab_dim):
super().__init__()
self.hidden_size = hidden_size
self.input2hidden = nn.Linear(embed_dim + hidden_size, hidden_size)
def forward(self, inputs, hidden):
combined = torch.cat((inputs, hidden), 2)
hidden = torch.relu(self.input2hidden(combined))
return hidden
def init_hidden(self, batch_size):
return torch.zeros(1, batch_size, self.hidden_size)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'embed_dim': 4, 'hidden_size': 4, 'vocab_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](primals_1, primals_2, buf0, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(64)](buf2,
primals_4, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_4
return buf2, reinterpret_tensor(buf0, (16, 8), (8, 1), 0), buf3
class RNNCellNew(nn.Module):
def __init__(self, embed_dim, hidden_size, vocab_dim):
super().__init__()
self.hidden_size = hidden_size
self.input2hidden = nn.Linear(embed_dim + hidden_size, hidden_size)
def init_hidden(self, batch_size):
return torch.zeros(1, batch_size, self.hidden_size)
def forward(self, input_0, input_1):
primals_3 = self.input2hidden.weight
primals_4 = self.input2hidden.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| sdhnshu/HandsOnDeepLearningWithPytorch | RNNCell | false | 16,379 | [
"MIT"
] | 87 | 2292a952a4cb112b03d5db4048c78bc503eb858d | https://github.com/sdhnshu/HandsOnDeepLearningWithPytorch/tree/2292a952a4cb112b03d5db4048c78bc503eb858d |
Connection_Combination | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/wx/cwxwvlntewdrqi2r4caciy5ht4jdvafnhtiqncr4lo4aegcb4imz.py
# Topologically Sorted Source Nodes: [betas], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# betas => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/fj/cfjl47pvhwbpfbvh6rfehwy5ijxc5p3zgkld2lwf3mw5bl6pbkak.py
# Topologically Sorted Source Nodes: [betas], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# betas => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/tc/ctcfsl4emgabshl7qx3ofgi7bses2jat5idvqnfbf22vjn5vbsum.py
# Topologically Sorted Source Nodes: [mul, mul_1, mul_2, mul_3, add, mul_4, mul_5, mix, mix_1], Original ATen: [aten.mul, aten.add, aten.relu]
# Source node to ATen node mapping:
# add => add
# mix => add_1
# mix_1 => relu
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# mul_5 => mul_5
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select, 3), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %arg1_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_1, 3), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %arg2_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_3), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_2, 3), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, %arg3_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %mul_5), kwargs = {})
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%add_1,), kwargs = {})
triton_poi_fused_add_mul_relu_2 = async_compile.triton('triton_poi_fused_add_mul_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_relu_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp5 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + (x2), xmask)
tmp10 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr3 + (x2), xmask)
tmp1 = 3.0
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp5 * tmp1
tmp8 = tmp6 * tmp7
tmp9 = tmp4 + tmp8
tmp11 = tmp10 * tmp1
tmp13 = tmp11 * tmp12
tmp14 = tmp9 + tmp13
tmp15 = tl.full([1], 0, tl.int32)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [betas], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [betas], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mul, mul_1, mul_2, mul_3, add, mul_4, mul_5, mix, mix_1], Original ATen: [aten.mul, aten.add, aten.relu]
triton_poi_fused_add_mul_relu_2.run(buf1, arg1_1, arg2_1, arg3_1, buf2, 256, grid=grid(256), stream=stream0)
del arg1_1
del arg2_1
del arg3_1
del buf1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed
import torch.nn.parallel
import torch.utils.data
import torch.utils.data.distributed
class Connection_Combination(nn.Module):
"""combine 3 types of connection method by 'beta' weights to become an input node """
def __init__(self):
super(Connection_Combination, self).__init__()
def forward(self, prev_parallel, prev_above, prev_below, betas):
betas = F.softmax(betas, dim=-1)
mix = 3 * betas[0] * prev_parallel + 3 * betas[1
] * prev_above + 3 * betas[2] * prev_below
mix = F.relu(mix)
return mix
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.distributed
import torch.nn.parallel
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_mul_relu_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp5 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + x2, xmask)
tmp10 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr3 + x2, xmask)
tmp1 = 3.0
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp5 * tmp1
tmp8 = tmp6 * tmp7
tmp9 = tmp4 + tmp8
tmp11 = tmp10 * tmp1
tmp13 = tmp11 * tmp12
tmp14 = tmp9 + tmp13
tmp15 = tl.full([1], 0, tl.int32)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tl.store(out_ptr0 + x2, tmp16, xmask)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused_add_mul_relu_2[grid(256)](buf1, arg1_1, arg2_1,
arg3_1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg1_1
del arg2_1
del arg3_1
del buf1
return buf2,
class Connection_CombinationNew(nn.Module):
"""combine 3 types of connection method by 'beta' weights to become an input node """
def __init__(self):
super(Connection_CombinationNew, self).__init__()
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
| senyang-ml/PoseNFS | Connection_Combination | false | 16,380 | [
"MIT"
] | 53 | 1229abb69917dab1e57def3de0e3fe9a8a3164cd | https://github.com/senyang-ml/PoseNFS/tree/1229abb69917dab1e57def3de0e3fe9a8a3164cd |
FinalConv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/wt/cwtjehqlpgsottzm6ypzegm4s3jeoie6y6l4z5tnv4psexwmszst.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%primals_1,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/gh/cgh6bfy2bihruxswmk3lelq27ifjcz4y2cqp5igddznjqqf2443f.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_2 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%squeeze,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ui/cuis2tm475qjfv4oluq6cwaimye7xuh2oqrvlj26snvou655eypg.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%squeeze_1, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%squeeze_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp3 + tmp1
tmp5 = triton_helpers.maximum(tmp2, tmp4)
tmp7 = tmp6 + tmp1
tmp8 = triton_helpers.maximum(tmp5, tmp7)
tmp10 = tmp9 + tmp1
tmp11 = triton_helpers.maximum(tmp8, tmp10)
tmp12 = tmp2 - tmp11
tmp13 = tl_math.exp(tmp12)
tmp14 = tmp4 - tmp11
tmp15 = tl_math.exp(tmp14)
tmp16 = tmp13 + tmp15
tmp17 = tmp7 - tmp11
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp20 = tmp10 - tmp11
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tl.store(out_ptr0 + (x0), tmp11, xmask)
tl.store(out_ptr1 + (x0), tmp22, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/62/c62kwlma2i4xddbarybouwfb3qsi45s55qat3qfrimxfo3wnty5s.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, div, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%squeeze_1, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%squeeze_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp7 = tmp5 / tmp6
tl.store(in_out_ptr0 + (x2), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (1, 4, 4), (0, 4, 1), 0), primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf1, (1, 4, 4), (16, 4, 1))
buf2 = reinterpret_tensor(buf1, (4, 4), (4, 1), 0); del buf1 # reuse
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf2, primals_3, buf7, 16, grid=grid(16), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(reinterpret_tensor(buf2, (1, 4, 4), (0, 4, 1), 0), primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf3, (1, 4, 4), (16, 4, 1))
buf4 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf5 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf3, primals_5, buf4, buf5, 4, grid=grid(4), stream=stream0)
buf6 = reinterpret_tensor(buf3, (4, 4), (4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf6, primals_5, buf4, buf5, 16, grid=grid(16), stream=stream0)
del buf4
del buf5
del primals_5
return (buf6, primals_2, primals_4, reinterpret_tensor(buf0, (1, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (1, 4, 4), (16, 4, 1), 0), buf6, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class FinalConv(torch.nn.Module):
def __init__(self, channels):
super().__init__()
self.conv1 = torch.nn.Conv1d(channels, channels, 1)
self.conv2 = torch.nn.Conv1d(channels, channels, 1)
self.relu = torch.nn.ReLU()
self.softmax = torch.nn.Softmax(dim=1)
def forward(self, x):
x = self.relu(x)
x = self.conv1(x)
x = self.relu(x)
x = self.conv2(x)
return self.softmax(x)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp3 + tmp1
tmp5 = triton_helpers.maximum(tmp2, tmp4)
tmp7 = tmp6 + tmp1
tmp8 = triton_helpers.maximum(tmp5, tmp7)
tmp10 = tmp9 + tmp1
tmp11 = triton_helpers.maximum(tmp8, tmp10)
tmp12 = tmp2 - tmp11
tmp13 = tl_math.exp(tmp12)
tmp14 = tmp4 - tmp11
tmp15 = tl_math.exp(tmp14)
tmp16 = tmp13 + tmp15
tmp17 = tmp7 - tmp11
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp20 = tmp10 - tmp11
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tl.store(out_ptr0 + x0, tmp11, xmask)
tl.store(out_ptr1 + x0, tmp22, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp7 = tmp5 / tmp6
tl.store(in_out_ptr0 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_relu_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (1, 4, 4
), (0, 4, 1), 0), primals_2, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf1, (1, 4, 4), (16, 4, 1))
buf2 = reinterpret_tensor(buf1, (4, 4), (4, 1), 0)
del buf1
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(16)](buf2,
primals_3, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
buf3 = extern_kernels.convolution(reinterpret_tensor(buf2, (1, 4, 4
), (0, 4, 1), 0), primals_4, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf3, (1, 4, 4), (16, 4, 1))
buf4 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf5 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused__softmax_2[grid(4)](buf3, primals_5, buf4, buf5, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf6 = reinterpret_tensor(buf3, (4, 4), (4, 1), 0)
del buf3
triton_poi_fused__softmax_3[grid(16)](buf6, primals_5, buf4, buf5,
16, XBLOCK=16, num_warps=1, num_stages=1)
del buf4
del buf5
del primals_5
return buf6, primals_2, primals_4, reinterpret_tensor(buf0, (1, 4, 4),
(16, 4, 1), 0), reinterpret_tensor(buf2, (1, 4, 4), (16, 4, 1), 0
), buf6, buf7
class FinalConvNew(torch.nn.Module):
def __init__(self, channels):
super().__init__()
self.conv1 = torch.nn.Conv1d(channels, channels, 1)
self.conv2 = torch.nn.Conv1d(channels, channels, 1)
self.relu = torch.nn.ReLU()
self.softmax = torch.nn.Softmax(dim=1)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| sdhnshu/HandsOnDeepLearningWithPytorch | FinalConv | false | 16,381 | [
"MIT"
] | 87 | 2292a952a4cb112b03d5db4048c78bc503eb858d | https://github.com/sdhnshu/HandsOnDeepLearningWithPytorch/tree/2292a952a4cb112b03d5db4048c78bc503eb858d |
Scale_B | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/wh/cwhiqmxs6jarebbowg7sybonilbiobgdiwbw3bqoqnzgcchqkxol.py
# Topologically Sorted Source Nodes: [result], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# result => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %primals_1), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [result], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_2, primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
return (buf0, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Scale_B(nn.Module):
"""
Learned per-channel scale factor, used to scale the noise
"""
def __init__(self, n_channel):
super().__init__()
self.weight = nn.Parameter(torch.zeros((1, n_channel, 1, 1)))
def forward(self, noise):
result = noise * self.weight
return result
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_channel': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](primals_2, primals_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
return buf0, primals_2
class Scale_BNew(nn.Module):
"""
Learned per-channel scale factor, used to scale the noise
"""
def __init__(self, n_channel):
super().__init__()
self.weight = nn.Parameter(torch.zeros((1, n_channel, 1, 1)))
def forward(self, input_0):
primals_1 = self.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| sergkuzn148/stg | Scale_B | false | 16,382 | [
"MIT"
] | 96 | 84d9f53ae3665c423836a4d0176dc3b22de62b19 | https://github.com/sergkuzn148/stg/tree/84d9f53ae3665c423836a4d0176dc3b22de62b19 |
SConv2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/mr/cmrzofxtfa5fe3ax4o3n5qvgpvhbgcrspjauzarmp4t443npav4h.py
# Topologically Sorted Source Nodes: [weight], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# weight => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 0.1767766952966369), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.1767766952966369
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/tc/ctcagp37ljugm52zu6ckorigrppqo67voefe2f2odg5r6hyllhyu.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %mul, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [weight], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(primals_3, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_2, 16, grid=grid(16), stream=stream0)
del primals_2
return (buf2, buf0, primals_3, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
def quick_scale(module, name='weight'):
ScaleW.apply(module, name)
return module
class ScaleW:
"""
Constructor: name - name of attribute to be scaled
"""
def __init__(self, name):
self.name = name
def scale(self, module):
weight = getattr(module, self.name + '_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * math.sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
"""
Apply runtime scaling to specific module
"""
hook = ScaleW(name)
weight = getattr(module, name)
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
del module._parameters[name]
module.register_forward_pre_hook(hook)
def __call__(self, module, whatever):
weight = self.scale(module)
setattr(module, self.name, weight)
class SConv2d(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
conv = nn.Conv2d(*args, **kwargs)
conv.weight.data.normal_()
conv.bias.data.zero_()
self.conv = quick_scale(conv)
def forward(self, x):
return self.conv(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.1767766952966369
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](primals_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(primals_3, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(16)](buf2, primals_2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return buf2, buf0, primals_3, buf0
def quick_scale(module, name='weight'):
ScaleW.apply(module, name)
return module
class ScaleW:
"""
Constructor: name - name of attribute to be scaled
"""
def __init__(self, name):
self.name = name
def scale(self, module):
weight = getattr(module, self.name + '_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * math.sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
"""
Apply runtime scaling to specific module
"""
hook = ScaleW(name)
weight = getattr(module, name)
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
del module._parameters[name]
module.register_forward_pre_hook(hook)
def __call__(self, module, whatever):
weight = self.scale(module)
setattr(module, self.name, weight)
class SConv2dNew(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
conv = nn.Conv2d(*args, **kwargs)
conv.weight.data.normal_()
conv.bias.data.zero_()
self.conv = quick_scale(conv)
def forward(self, input_0):
primals_2 = self.conv.bias
primals_1 = self.conv.weight_orig
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| sergkuzn148/stg | SConv2d | false | 16,383 | [
"MIT"
] | 96 | 84d9f53ae3665c423836a4d0176dc3b22de62b19 | https://github.com/sergkuzn148/stg/tree/84d9f53ae3665c423836a4d0176dc3b22de62b19 |
IntegrationModule | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/73/c73p32b23m4qxtuudtdayyjsgn5jjignb6di3ocugzndghffcrdx.py
# Topologically Sorted Source Nodes: [mul_2, mul_3, sub_2], Original ATen: [aten.mul, aten.sub]
# Source node to ATen node mapping:
# mul_2 => mul_2
# mul_3 => mul_3
# sub_2 => sub_2
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %unsqueeze_2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg2_1, %unsqueeze_5), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_2, %mul_3), kwargs = {})
triton_poi_fused_mul_sub_0 = async_compile.triton('triton_poi_fused_mul_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex % 256
x4 = (xindex // 64)
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x3), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x4), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr2 + (x3), None, eviction_policy='evict_last')
tmp2 = 0.2
tmp3 = tmp1 - tmp2
tmp4 = 1.25
tmp5 = tmp3 * tmp4
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = 1.0
tmp9 = triton_helpers.minimum(tmp7, tmp8)
tmp10 = tmp9 * tmp8
tmp11 = tmp0 * tmp10
tmp13 = tmp12 * tmp10
tmp14 = tmp11 - tmp13
tl.store(out_ptr0 + (x5), tmp14, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 4, 4, 4), (4096, 1024, 256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_2, mul_3, sub_2], Original ATen: [aten.mul, aten.sub]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_sub_0.run(arg1_1, arg0_1, arg2_1, buf0, 16384, grid=grid(16384), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class IntegrationModule(nn.Module):
def __init__(self, min_iou=0.2, enhance_weight_max=1.0,
reduce_weight_max=1.0):
super(IntegrationModule, self).__init__()
self.min_iou = min_iou
self.enhance_weight_max = enhance_weight_max
self.reduce_weight_max = reduce_weight_max
def forward(self, enhance_feature, reduce_feature, overlaps):
enhance_weight = self.compute_weight(overlaps, self.
enhance_weight_max, self.min_iou)
reduce_weight = self.compute_weight(overlaps, self.
reduce_weight_max, self.min_iou)
return enhance_feature * enhance_weight.unsqueeze(-1).unsqueeze(-1
).unsqueeze(-1) - reduce_feature * reduce_weight.unsqueeze(-1
).unsqueeze(-1).unsqueeze(-1)
def compute_weight(self, ious, weight_max, iou_min):
weight = weight_max * torch.min(torch.max((ious - iou_min) / (1.0 -
iou_min), torch.zeros_like(ious)), torch.ones_like(ious))
return weight
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex % 256
x4 = xindex // 64
x5 = xindex
tmp0 = tl.load(in_ptr0 + x3, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x4, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr2 + x3, None, eviction_policy='evict_last')
tmp2 = 0.2
tmp3 = tmp1 - tmp2
tmp4 = 1.25
tmp5 = tmp3 * tmp4
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = 1.0
tmp9 = triton_helpers.minimum(tmp7, tmp8)
tmp10 = tmp9 * tmp8
tmp11 = tmp0 * tmp10
tmp13 = tmp12 * tmp10
tmp14 = tmp11 - tmp13
tl.store(out_ptr0 + x5, tmp14, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 4, 4, 4), (4096, 1024, 256,
64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sub_0[grid(16384)](arg1_1, arg0_1, arg2_1,
buf0, 16384, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf0,
class IntegrationModuleNew(nn.Module):
def __init__(self, min_iou=0.2, enhance_weight_max=1.0,
reduce_weight_max=1.0):
super(IntegrationModuleNew, self).__init__()
self.min_iou = min_iou
self.enhance_weight_max = enhance_weight_max
self.reduce_weight_max = reduce_weight_max
def compute_weight(self, ious, weight_max, iou_min):
weight = weight_max * torch.min(torch.max((ious - iou_min) / (1.0 -
iou_min), torch.zeros_like(ious)), torch.ones_like(ious))
return weight
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| sguo2908/TADAM | IntegrationModule | false | 16,384 | [
"MIT"
] | 47 | abd0b7422c3582e36c928778894cee8a159f896e | https://github.com/sguo2908/TADAM/tree/abd0b7422c3582e36c928778894cee8a159f896e |
FC_A | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/52/c526p7iwll7vx7gobeuv6q3lym4ek7lbhopuykpcibc57bou263i.py
# Topologically Sorted Source Nodes: [weight], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# weight => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 0.7071067811865476), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.7071067811865476
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((8, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [weight], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_1, buf0, 32, grid=grid(32), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 8), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_2
return (reinterpret_tensor(buf1, (4, 4, 1, 1, 4, 8), (128, 32, 32, 32, 8, 1), 0), buf0, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
def quick_scale(module, name='weight'):
ScaleW.apply(module, name)
return module
class ScaleW:
"""
Constructor: name - name of attribute to be scaled
"""
def __init__(self, name):
self.name = name
def scale(self, module):
weight = getattr(module, self.name + '_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * math.sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
"""
Apply runtime scaling to specific module
"""
hook = ScaleW(name)
weight = getattr(module, name)
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
del module._parameters[name]
module.register_forward_pre_hook(hook)
def __call__(self, module, whatever):
weight = self.scale(module)
setattr(module, self.name, weight)
class SLinear(nn.Module):
def __init__(self, dim_in, dim_out):
super().__init__()
linear = nn.Linear(dim_in, dim_out)
linear.weight.data.normal_()
linear.bias.data.zero_()
self.linear = quick_scale(linear)
def forward(self, x):
return self.linear(x)
class FC_A(nn.Module):
"""
Learned affine transform A, this module is used to transform
midiate vector w into a style vector
"""
def __init__(self, dim_latent, n_channel):
super().__init__()
self.transform = SLinear(dim_latent, n_channel * 2)
self.transform.linear.bias.data[:n_channel] = 1
self.transform.linear.bias.data[n_channel:] = 0
def forward(self, w):
style = self.transform(w).unsqueeze(2).unsqueeze(3)
return style
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim_latent': 4, 'n_channel': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.7071067811865476
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((8, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(32)](primals_1, buf0, 32, XBLOCK=32,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(buf0, (4, 8), (1, 4), 0),
alpha=1, beta=1, out=buf1)
del primals_2
return reinterpret_tensor(buf1, (4, 4, 1, 1, 4, 8), (128, 32, 32, 32, 8,
1), 0), buf0, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0)
def quick_scale(module, name='weight'):
ScaleW.apply(module, name)
return module
class ScaleW:
"""
Constructor: name - name of attribute to be scaled
"""
def __init__(self, name):
self.name = name
def scale(self, module):
weight = getattr(module, self.name + '_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * math.sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
"""
Apply runtime scaling to specific module
"""
hook = ScaleW(name)
weight = getattr(module, name)
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
del module._parameters[name]
module.register_forward_pre_hook(hook)
def __call__(self, module, whatever):
weight = self.scale(module)
setattr(module, self.name, weight)
class SLinear(nn.Module):
def __init__(self, dim_in, dim_out):
super().__init__()
linear = nn.Linear(dim_in, dim_out)
linear.weight.data.normal_()
linear.bias.data.zero_()
self.linear = quick_scale(linear)
def forward(self, x):
return self.linear(x)
class FC_ANew(nn.Module):
"""
Learned affine transform A, this module is used to transform
midiate vector w into a style vector
"""
def __init__(self, dim_latent, n_channel):
super().__init__()
self.transform = SLinear(dim_latent, n_channel * 2)
self.transform.linear.bias.data[:n_channel] = 1
self.transform.linear.bias.data[n_channel:] = 0
def forward(self, input_0):
primals_2 = self.transform.linear.bias
primals_1 = self.transform.linear.weight_orig
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| sergkuzn148/stg | FC_A | false | 16,385 | [
"MIT"
] | 96 | 84d9f53ae3665c423836a4d0176dc3b22de62b19 | https://github.com/sergkuzn148/stg/tree/84d9f53ae3665c423836a4d0176dc3b22de62b19 |
SLinear | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/oy/coy4v6ev22tv33nc6asaz3obrskaw2f3vho4q3aj4yqpth7c2y2m.py
# Topologically Sorted Source Nodes: [weight], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# weight => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 0.7071067811865476), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.7071067811865476
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [weight], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_2
return (reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf0, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
def quick_scale(module, name='weight'):
ScaleW.apply(module, name)
return module
class ScaleW:
"""
Constructor: name - name of attribute to be scaled
"""
def __init__(self, name):
self.name = name
def scale(self, module):
weight = getattr(module, self.name + '_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * math.sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
"""
Apply runtime scaling to specific module
"""
hook = ScaleW(name)
weight = getattr(module, name)
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
del module._parameters[name]
module.register_forward_pre_hook(hook)
def __call__(self, module, whatever):
weight = self.scale(module)
setattr(module, self.name, weight)
class SLinear(nn.Module):
def __init__(self, dim_in, dim_out):
super().__init__()
linear = nn.Linear(dim_in, dim_out)
linear.weight.data.normal_()
linear.bias.data.zero_()
self.linear = quick_scale(linear)
def forward(self, x):
return self.linear(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim_in': 4, 'dim_out': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.7071067811865476
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf1)
del primals_2
return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf0, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0)
def quick_scale(module, name='weight'):
ScaleW.apply(module, name)
return module
class ScaleW:
"""
Constructor: name - name of attribute to be scaled
"""
def __init__(self, name):
self.name = name
def scale(self, module):
weight = getattr(module, self.name + '_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * math.sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
"""
Apply runtime scaling to specific module
"""
hook = ScaleW(name)
weight = getattr(module, name)
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
del module._parameters[name]
module.register_forward_pre_hook(hook)
def __call__(self, module, whatever):
weight = self.scale(module)
setattr(module, self.name, weight)
class SLinearNew(nn.Module):
def __init__(self, dim_in, dim_out):
super().__init__()
linear = nn.Linear(dim_in, dim_out)
linear.weight.data.normal_()
linear.bias.data.zero_()
self.linear = quick_scale(linear)
def forward(self, input_0):
primals_2 = self.linear.bias
primals_1 = self.linear.weight_orig
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| sergkuzn148/stg | SLinear | false | 16,386 | [
"MIT"
] | 96 | 84d9f53ae3665c423836a4d0176dc3b22de62b19 | https://github.com/sergkuzn148/stg/tree/84d9f53ae3665c423836a4d0176dc3b22de62b19 |
Sinkhorn_Net | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/sf/csf7rcpf2nmlcgbtaotve4ywrouvruwuzvecg4aeyywsga23hgpe.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 1), (1, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((256, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (256, 1), (1, 1), 0), reinterpret_tensor(primals_2, (1, 4), (1, 1), 0), out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, primals_3, 1024, grid=grid(1024), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((256, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
return (reinterpret_tensor(buf2, (64, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (256, 1), (1, 1), 0), buf1, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.cuda
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
class Features(nn.Module):
def __init__(self, latent_dim, output_dim, dropout_prob):
"""
In the constructor we instantiate two nn.Linear modules and assign them as
member variables.
This Feature extractor class takes an input and constructs a feature vector. It can be applied independently to all elements of the input sequence
in_flattened_vector: input flattened vector
latent_dim: number of neurons in latent layer
output_dim: dimension of log alpha square matrix
"""
super().__init__()
self.linear1 = nn.Linear(1, latent_dim)
self.relu1 = nn.ReLU()
self.d1 = nn.Dropout(p=dropout_prob)
self.linear2 = nn.Linear(latent_dim, output_dim)
self.d2 = nn.Dropout(p=dropout_prob)
def forward(self, x):
"""
In the forward function we accept a Variable of input data and we must
return a Variable of output data. We can use Modules defined in the
constructor as well as arbitrary operators on Variables.
x: Tensor of shape (batch_size, 1)
"""
x = self.d1(self.relu1(self.linear1(x)))
x = self.d2(self.linear2(x))
return x
class Sinkhorn_Net(nn.Module):
def __init__(self, latent_dim, output_dim, dropout_prob):
super().__init__()
self.output_dim = output_dim
self.features = Features(latent_dim, output_dim, dropout_prob)
def forward(self, x):
"""
x: Tensor of length (batch, sequence_length)
Note that output_dim should correspond to the intended sequence length
"""
x = x.view(-1, 1)
x = self.features(x)
x = x.reshape(-1, self.output_dim, self.output_dim)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'latent_dim': 4, 'output_dim': 4, 'dropout_prob': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
import torch.cuda
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 1), (1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((256, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (256, 1), (1, 1), 0
), reinterpret_tensor(primals_2, (1, 4), (1, 1), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(1024)](buf1, primals_3, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((256, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (64, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_1, (256, 1), (1, 1), 0), buf1, primals_4
class Features(nn.Module):
def __init__(self, latent_dim, output_dim, dropout_prob):
"""
In the constructor we instantiate two nn.Linear modules and assign them as
member variables.
This Feature extractor class takes an input and constructs a feature vector. It can be applied independently to all elements of the input sequence
in_flattened_vector: input flattened vector
latent_dim: number of neurons in latent layer
output_dim: dimension of log alpha square matrix
"""
super().__init__()
self.linear1 = nn.Linear(1, latent_dim)
self.relu1 = nn.ReLU()
self.d1 = nn.Dropout(p=dropout_prob)
self.linear2 = nn.Linear(latent_dim, output_dim)
self.d2 = nn.Dropout(p=dropout_prob)
def forward(self, x):
"""
In the forward function we accept a Variable of input data and we must
return a Variable of output data. We can use Modules defined in the
constructor as well as arbitrary operators on Variables.
x: Tensor of shape (batch_size, 1)
"""
x = self.d1(self.relu1(self.linear1(x)))
x = self.d2(self.linear2(x))
return x
class Sinkhorn_NetNew(nn.Module):
def __init__(self, latent_dim, output_dim, dropout_prob):
super().__init__()
self.output_dim = output_dim
self.features = Features(latent_dim, output_dim, dropout_prob)
def forward(self, input_0):
primals_2 = self.features.linear1.weight
primals_3 = self.features.linear1.bias
primals_4 = self.features.linear2.weight
primals_5 = self.features.linear2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| sfox14/butterfly | Sinkhorn_Net | false | 16,387 | [
"Apache-2.0"
] | 52 | 13cc15cee5bdb7adaf376219aaf20fab0459e9ef | https://github.com/sfox14/butterfly/tree/13cc15cee5bdb7adaf376219aaf20fab0459e9ef |
LowRankConv2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/rz/crzkuzagx7fu4niiz56iqwfhlsterez4gyzrhx7mj3sxap33xxbi.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_4, %view, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (16*x1)), xmask & ymask)
tl.store(out_ptr0 + (x1 + (16*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/tc/ctcagp37ljugm52zu6ckorigrppqo67voefe2f2odg5r6hyllhyu.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_4, %view, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (16, 4, 1), (4, 1, 1))
assert_size_stride(primals_2, (16, 1, 4), (4, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm]
extern_kernels.bmm(primals_1, primals_2, out=buf0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf0, buf1, 16, 16, grid=grid(16, 16), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(primals_4, buf1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
del buf1
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf3, primals_3, 16, grid=grid(16), stream=stream0)
del primals_3
return (buf3, primals_4, reinterpret_tensor(buf0, (4, 4, 4, 4), (4, 1, 64, 16), 0), reinterpret_tensor(primals_1, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(primals_2, (16, 4, 1), (4, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((16, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
from torch import nn
import torch.nn.functional as F
import torch.cuda
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
class LowRankConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, bias=True, rank=1):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = (kernel_size, kernel_size) if isinstance(kernel_size
, int) else kernel_size
self.stride = (stride, stride) if isinstance(stride, int) else stride
self.padding = (padding, padding) if isinstance(padding, int
) else padding
self.dilation = (dilation, dilation) if isinstance(dilation, int
) else dilation
self.rank = rank
self.G = nn.Parameter(torch.Tensor(self.kernel_size[0] * self.
kernel_size[1], self.rank, self.in_channels))
self.H = nn.Parameter(torch.Tensor(self.kernel_size[0] * self.
kernel_size[1], self.out_channels, self.rank))
if bias:
self.bias = nn.Parameter(torch.Tensor(self.out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
fan_in, _fan_out = self.in_channels, self.out_channels
nn.init.uniform_(self.G, -1 / math.sqrt(fan_in), 1 / math.sqrt(fan_in))
nn.init.uniform_(self.H, -1 / math.sqrt(self.rank), 1 / math.sqrt(
self.rank))
if self.bias is not None:
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(self.bias, -bound, bound)
def forward(self, x):
M = torch.bmm(self.H, self.G).permute(1, 2, 0).reshape(self.
out_channels, self.in_channels, *self.kernel_size)
return F.conv2d(x, M, self.bias, self.stride, self.padding, self.
dilation)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
from torch import nn
import torch.cuda
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (16, 4, 1), (4, 1, 1))
assert_size_stride(primals_2, (16, 1, 4), (4, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(primals_1, primals_2, out=buf0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 16)](buf0, buf1, 16, 16,
XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
buf2 = extern_kernels.convolution(primals_4, buf1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
del buf1
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(16)](buf3, primals_3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
return buf3, primals_4, reinterpret_tensor(buf0, (4, 4, 4, 4), (4, 1,
64, 16), 0), reinterpret_tensor(primals_1, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(primals_2, (16, 4, 1), (4, 1, 4), 0)
class LowRankConv2dNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, bias=True, rank=1):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = (kernel_size, kernel_size) if isinstance(kernel_size
, int) else kernel_size
self.stride = (stride, stride) if isinstance(stride, int) else stride
self.padding = (padding, padding) if isinstance(padding, int
) else padding
self.dilation = (dilation, dilation) if isinstance(dilation, int
) else dilation
self.rank = rank
self.G = nn.Parameter(torch.Tensor(self.kernel_size[0] * self.
kernel_size[1], self.rank, self.in_channels))
self.H = nn.Parameter(torch.Tensor(self.kernel_size[0] * self.
kernel_size[1], self.out_channels, self.rank))
if bias:
self.bias = nn.Parameter(torch.Tensor(self.out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
fan_in, _fan_out = self.in_channels, self.out_channels
nn.init.uniform_(self.G, -1 / math.sqrt(fan_in), 1 / math.sqrt(fan_in))
nn.init.uniform_(self.H, -1 / math.sqrt(self.rank), 1 / math.sqrt(
self.rank))
if self.bias is not None:
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(self.bias, -bound, bound)
def forward(self, input_0):
primals_2 = self.G
primals_1 = self.H
primals_3 = self.bias
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| sfox14/butterfly | LowRankConv2d | false | 16,388 | [
"Apache-2.0"
] | 52 | 13cc15cee5bdb7adaf376219aaf20fab0459e9ef | https://github.com/sfox14/butterfly/tree/13cc15cee5bdb7adaf376219aaf20fab0459e9ef |
MSELoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/po/cpo32gb5lvcyrurzwgtqq3rv7yd534imnt5i3lvf2d2xfpunysix.py
# Topologically Sorted Source Nodes: [mul, sub, pow_1, mean, mean_1, losses, mean_2, back_loss], Original ATen: [aten.mul, aten.sub, aten.pow, aten.mean]
# Source node to ATen node mapping:
# back_loss => mean_3
# losses => mul_1
# mean => mean
# mean_1 => mean_1
# mean_2 => mean_2
# mul => mul
# pow_1 => pow_1
# sub => sub
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.5), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg2_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [3]), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mean, [2]), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %mean_1), kwargs = {})
# %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mul_1, [1]), kwargs = {})
# %mean_3 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mean_2, [0]), kwargs = {})
triton_poi_fused_mean_mul_pow_sub_0 = async_compile.triton('triton_poi_fused_mean_mul_pow_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_mul_pow_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 48, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_mul_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (16*x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (1 + (16*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (2 + (16*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (3 + (16*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr1 + (4 + (16*x0)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr1 + (5 + (16*x0)), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr1 + (6 + (16*x0)), xmask, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last')
tmp36 = tl.load(in_ptr1 + (7 + (16*x0)), xmask, eviction_policy='evict_last')
tmp42 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last')
tmp43 = tl.load(in_ptr1 + (8 + (16*x0)), xmask, eviction_policy='evict_last')
tmp46 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr1 + (9 + (16*x0)), xmask, eviction_policy='evict_last')
tmp51 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last')
tmp52 = tl.load(in_ptr1 + (10 + (16*x0)), xmask, eviction_policy='evict_last')
tmp56 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last')
tmp57 = tl.load(in_ptr1 + (11 + (16*x0)), xmask, eviction_policy='evict_last')
tmp63 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last')
tmp64 = tl.load(in_ptr1 + (12 + (16*x0)), xmask, eviction_policy='evict_last')
tmp67 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last')
tmp68 = tl.load(in_ptr1 + (13 + (16*x0)), xmask, eviction_policy='evict_last')
tmp72 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last')
tmp73 = tl.load(in_ptr1 + (14 + (16*x0)), xmask, eviction_policy='evict_last')
tmp77 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last')
tmp78 = tl.load(in_ptr1 + (15 + (16*x0)), xmask, eviction_policy='evict_last')
tmp85 = tl.load(in_ptr2 + (x0), xmask)
tmp89 = tl.load(in_ptr2 + (16 + x0), xmask)
tmp93 = tl.load(in_ptr2 + (32 + x0), xmask)
tmp97 = tl.load(in_ptr2 + (48 + x0), xmask)
tmp102 = tl.load(in_ptr2 + (64 + x0), xmask)
tmp105 = tl.load(in_ptr2 + (80 + x0), xmask)
tmp109 = tl.load(in_ptr2 + (96 + x0), xmask)
tmp113 = tl.load(in_ptr2 + (112 + x0), xmask)
tmp119 = tl.load(in_ptr2 + (128 + x0), xmask)
tmp122 = tl.load(in_ptr2 + (144 + x0), xmask)
tmp126 = tl.load(in_ptr2 + (160 + x0), xmask)
tmp130 = tl.load(in_ptr2 + (176 + x0), xmask)
tmp136 = tl.load(in_ptr2 + (192 + x0), xmask)
tmp139 = tl.load(in_ptr2 + (208 + x0), xmask)
tmp143 = tl.load(in_ptr2 + (224 + x0), xmask)
tmp147 = tl.load(in_ptr2 + (240 + x0), xmask)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = 4.0
tmp20 = tmp18 / tmp19
tmp23 = tmp21 - tmp22
tmp24 = tmp23 * tmp23
tmp27 = tmp25 - tmp26
tmp28 = tmp27 * tmp27
tmp29 = tmp24 + tmp28
tmp32 = tmp30 - tmp31
tmp33 = tmp32 * tmp32
tmp34 = tmp29 + tmp33
tmp37 = tmp35 - tmp36
tmp38 = tmp37 * tmp37
tmp39 = tmp34 + tmp38
tmp40 = tmp39 / tmp19
tmp41 = tmp20 + tmp40
tmp44 = tmp42 - tmp43
tmp45 = tmp44 * tmp44
tmp48 = tmp46 - tmp47
tmp49 = tmp48 * tmp48
tmp50 = tmp45 + tmp49
tmp53 = tmp51 - tmp52
tmp54 = tmp53 * tmp53
tmp55 = tmp50 + tmp54
tmp58 = tmp56 - tmp57
tmp59 = tmp58 * tmp58
tmp60 = tmp55 + tmp59
tmp61 = tmp60 / tmp19
tmp62 = tmp41 + tmp61
tmp65 = tmp63 - tmp64
tmp66 = tmp65 * tmp65
tmp69 = tmp67 - tmp68
tmp70 = tmp69 * tmp69
tmp71 = tmp66 + tmp70
tmp74 = tmp72 - tmp73
tmp75 = tmp74 * tmp74
tmp76 = tmp71 + tmp75
tmp79 = tmp77 - tmp78
tmp80 = tmp79 * tmp79
tmp81 = tmp76 + tmp80
tmp82 = tmp81 / tmp19
tmp83 = tmp62 + tmp82
tmp84 = tmp83 / tmp19
tmp86 = 0.5
tmp87 = tmp85 * tmp86
tmp88 = tmp87 * tmp84
tmp90 = tmp89 * tmp86
tmp91 = tmp90 * tmp84
tmp92 = tmp88 + tmp91
tmp94 = tmp93 * tmp86
tmp95 = tmp94 * tmp84
tmp96 = tmp92 + tmp95
tmp98 = tmp97 * tmp86
tmp99 = tmp98 * tmp84
tmp100 = tmp96 + tmp99
tmp101 = tmp100 / tmp19
tmp103 = tmp102 * tmp86
tmp104 = tmp103 * tmp84
tmp106 = tmp105 * tmp86
tmp107 = tmp106 * tmp84
tmp108 = tmp104 + tmp107
tmp110 = tmp109 * tmp86
tmp111 = tmp110 * tmp84
tmp112 = tmp108 + tmp111
tmp114 = tmp113 * tmp86
tmp115 = tmp114 * tmp84
tmp116 = tmp112 + tmp115
tmp117 = tmp116 / tmp19
tmp118 = tmp101 + tmp117
tmp120 = tmp119 * tmp86
tmp121 = tmp120 * tmp84
tmp123 = tmp122 * tmp86
tmp124 = tmp123 * tmp84
tmp125 = tmp121 + tmp124
tmp127 = tmp126 * tmp86
tmp128 = tmp127 * tmp84
tmp129 = tmp125 + tmp128
tmp131 = tmp130 * tmp86
tmp132 = tmp131 * tmp84
tmp133 = tmp129 + tmp132
tmp134 = tmp133 / tmp19
tmp135 = tmp118 + tmp134
tmp137 = tmp136 * tmp86
tmp138 = tmp137 * tmp84
tmp140 = tmp139 * tmp86
tmp141 = tmp140 * tmp84
tmp142 = tmp138 + tmp141
tmp144 = tmp143 * tmp86
tmp145 = tmp144 * tmp84
tmp146 = tmp142 + tmp145
tmp148 = tmp147 * tmp86
tmp149 = tmp148 * tmp84
tmp150 = tmp146 + tmp149
tmp151 = tmp150 / tmp19
tmp152 = tmp135 + tmp151
tmp153 = tmp152 / tmp19
tl.store(in_out_ptr0 + (x0), tmp153, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mul, sub, pow_1, mean, mean_1, losses, mean_2, back_loss], Original ATen: [aten.mul, aten.sub, aten.pow, aten.mean]
stream0 = get_raw_stream(0)
triton_poi_fused_mean_mul_pow_sub_0.run(buf1, arg1_1, arg2_1, arg0_1, 16, grid=grid(16), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.distributed
import torch.nn.parallel
import torch.utils.data
import torch.utils.data.distributed
class MSELoss(torch.nn.Module):
def __init__(self):
super(MSELoss, self).__init__()
def forward(self, preds, heatmap_gt, weight):
losses = 0.5 * weight * ((preds - heatmap_gt) ** 2).mean(dim=3).mean(
dim=2)
back_loss = losses.mean(dim=1).mean(dim=0)
return back_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.distributed
import torch.nn.parallel
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_mul_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 16 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr1 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr1 + (2 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr1 + (3 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp22 = tl.load(in_ptr1 + (4 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr1 + (5 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp31 = tl.load(in_ptr1 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp35 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp36 = tl.load(in_ptr1 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp42 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp43 = tl.load(in_ptr1 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp46 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp47 = tl.load(in_ptr1 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp51 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp52 = tl.load(in_ptr1 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp56 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp57 = tl.load(in_ptr1 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp63 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp64 = tl.load(in_ptr1 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp67 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp68 = tl.load(in_ptr1 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp72 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp73 = tl.load(in_ptr1 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp77 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp78 = tl.load(in_ptr1 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp85 = tl.load(in_ptr2 + x0, xmask)
tmp89 = tl.load(in_ptr2 + (16 + x0), xmask)
tmp93 = tl.load(in_ptr2 + (32 + x0), xmask)
tmp97 = tl.load(in_ptr2 + (48 + x0), xmask)
tmp102 = tl.load(in_ptr2 + (64 + x0), xmask)
tmp105 = tl.load(in_ptr2 + (80 + x0), xmask)
tmp109 = tl.load(in_ptr2 + (96 + x0), xmask)
tmp113 = tl.load(in_ptr2 + (112 + x0), xmask)
tmp119 = tl.load(in_ptr2 + (128 + x0), xmask)
tmp122 = tl.load(in_ptr2 + (144 + x0), xmask)
tmp126 = tl.load(in_ptr2 + (160 + x0), xmask)
tmp130 = tl.load(in_ptr2 + (176 + x0), xmask)
tmp136 = tl.load(in_ptr2 + (192 + x0), xmask)
tmp139 = tl.load(in_ptr2 + (208 + x0), xmask)
tmp143 = tl.load(in_ptr2 + (224 + x0), xmask)
tmp147 = tl.load(in_ptr2 + (240 + x0), xmask)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = 4.0
tmp20 = tmp18 / tmp19
tmp23 = tmp21 - tmp22
tmp24 = tmp23 * tmp23
tmp27 = tmp25 - tmp26
tmp28 = tmp27 * tmp27
tmp29 = tmp24 + tmp28
tmp32 = tmp30 - tmp31
tmp33 = tmp32 * tmp32
tmp34 = tmp29 + tmp33
tmp37 = tmp35 - tmp36
tmp38 = tmp37 * tmp37
tmp39 = tmp34 + tmp38
tmp40 = tmp39 / tmp19
tmp41 = tmp20 + tmp40
tmp44 = tmp42 - tmp43
tmp45 = tmp44 * tmp44
tmp48 = tmp46 - tmp47
tmp49 = tmp48 * tmp48
tmp50 = tmp45 + tmp49
tmp53 = tmp51 - tmp52
tmp54 = tmp53 * tmp53
tmp55 = tmp50 + tmp54
tmp58 = tmp56 - tmp57
tmp59 = tmp58 * tmp58
tmp60 = tmp55 + tmp59
tmp61 = tmp60 / tmp19
tmp62 = tmp41 + tmp61
tmp65 = tmp63 - tmp64
tmp66 = tmp65 * tmp65
tmp69 = tmp67 - tmp68
tmp70 = tmp69 * tmp69
tmp71 = tmp66 + tmp70
tmp74 = tmp72 - tmp73
tmp75 = tmp74 * tmp74
tmp76 = tmp71 + tmp75
tmp79 = tmp77 - tmp78
tmp80 = tmp79 * tmp79
tmp81 = tmp76 + tmp80
tmp82 = tmp81 / tmp19
tmp83 = tmp62 + tmp82
tmp84 = tmp83 / tmp19
tmp86 = 0.5
tmp87 = tmp85 * tmp86
tmp88 = tmp87 * tmp84
tmp90 = tmp89 * tmp86
tmp91 = tmp90 * tmp84
tmp92 = tmp88 + tmp91
tmp94 = tmp93 * tmp86
tmp95 = tmp94 * tmp84
tmp96 = tmp92 + tmp95
tmp98 = tmp97 * tmp86
tmp99 = tmp98 * tmp84
tmp100 = tmp96 + tmp99
tmp101 = tmp100 / tmp19
tmp103 = tmp102 * tmp86
tmp104 = tmp103 * tmp84
tmp106 = tmp105 * tmp86
tmp107 = tmp106 * tmp84
tmp108 = tmp104 + tmp107
tmp110 = tmp109 * tmp86
tmp111 = tmp110 * tmp84
tmp112 = tmp108 + tmp111
tmp114 = tmp113 * tmp86
tmp115 = tmp114 * tmp84
tmp116 = tmp112 + tmp115
tmp117 = tmp116 / tmp19
tmp118 = tmp101 + tmp117
tmp120 = tmp119 * tmp86
tmp121 = tmp120 * tmp84
tmp123 = tmp122 * tmp86
tmp124 = tmp123 * tmp84
tmp125 = tmp121 + tmp124
tmp127 = tmp126 * tmp86
tmp128 = tmp127 * tmp84
tmp129 = tmp125 + tmp128
tmp131 = tmp130 * tmp86
tmp132 = tmp131 * tmp84
tmp133 = tmp129 + tmp132
tmp134 = tmp133 / tmp19
tmp135 = tmp118 + tmp134
tmp137 = tmp136 * tmp86
tmp138 = tmp137 * tmp84
tmp140 = tmp139 * tmp86
tmp141 = tmp140 * tmp84
tmp142 = tmp138 + tmp141
tmp144 = tmp143 * tmp86
tmp145 = tmp144 * tmp84
tmp146 = tmp142 + tmp145
tmp148 = tmp147 * tmp86
tmp149 = tmp148 * tmp84
tmp150 = tmp146 + tmp149
tmp151 = tmp150 / tmp19
tmp152 = tmp135 + tmp151
tmp153 = tmp152 / tmp19
tl.store(in_out_ptr0 + x0, tmp153, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_mean_mul_pow_sub_0[grid(16)](buf1, arg1_1, arg2_1,
arg0_1, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf1,
class MSELossNew(torch.nn.Module):
def __init__(self):
super(MSELossNew, self).__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| senyang-ml/PoseNFS | MSELoss | false | 16,389 | [
"MIT"
] | 53 | 1229abb69917dab1e57def3de0e3fe9a8a3164cd | https://github.com/senyang-ml/PoseNFS/tree/1229abb69917dab1e57def3de0e3fe9a8a3164cd |
BilinearAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ay/caylcn737p2wwjm32cacv462xdgdut6ho32ptwxfu34t3i2tr75z.py
# Topologically Sorted Source Nodes: [final], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# final => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask)
tl.store(out_ptr0 + (x4), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/6f/c6fzxsthxjpbsz53dpcunjzrud7xu5rsvlujaybaii5bqgmtcuss.py
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%squeeze, %primals_4), kwargs = {})
triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [intermediate], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_2, (16, 4, 4), (0, 4, 1), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 1, 4, 4, 4), (64, 1, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [final], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(primals_3, buf1, 256, grid=grid(256), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [final], Original ATen: [aten.bmm]
extern_kernels.bmm(buf0, reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), out=buf2)
del buf0
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
triton_poi_fused_add_1.run(buf3, primals_4, 256, grid=grid(256), stream=stream0)
del primals_4
return (buf3, reinterpret_tensor(buf1, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(primals_1, (16, 4, 4), (16, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class BilinearAttention(nn.Module):
"""
Computes attention between two matrices using a bilinear attention function. This
function has a matrix of weights ``W`` and a bias ``b``, and the similarity between
the two matrices ``X`` and ``Y`` is computed as ``X W Y^T + b``.
Input: - mat1: ``(batch_size, num_rows_1, mat1_dim)`` - mat2: ``(batch_size,
num_rows_2, mat2_dim)``
Output: - ``(batch_size, num_rows_1, num_rows_2)``
"""
def __init__(self, mat1_dim: 'int', mat2_dim: 'int', use_input_biases:
'bool'=False) ->None:
super().__init__()
if use_input_biases:
mat1_dim += 1
mat2_dim += 1
self.weight = nn.Parameter(torch.zeros(1, mat1_dim, mat2_dim))
self.bias = nn.Parameter(torch.zeros(1))
self._use_input_biases = use_input_biases
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.weight)
self.bias.data.fill_(0)
def forward(self, mat1: 'torch.Tensor', mat2: 'torch.Tensor'
) ->torch.Tensor:
if self._use_input_biases:
bias1 = mat1.new_ones(mat1.size()[:-1] + (1,))
bias2 = mat2.new_ones(mat2.size()[:-1] + (1,))
mat1 = torch.cat([mat1, bias1], -1)
mat2 = torch.cat([mat2, bias2], -1)
intermediate = torch.matmul(mat1.unsqueeze(1), self.weight)
final = torch.matmul(intermediate, mat2.unsqueeze(1).transpose(2, 3))
return final.squeeze(1) + self.bias
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'mat1_dim': 4, 'mat2_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_1, (16, 4, 4), (16, 4,
1), 0), reinterpret_tensor(primals_2, (16, 4, 4), (0, 4, 1), 0),
out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 1, 4, 4, 4), (64, 1, 16, 4, 1), torch
.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](primals_3, buf1, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf0, reinterpret_tensor(buf1, (16, 4, 4), (16,
4, 1), 0), out=buf2)
del buf0
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_add_1[grid(256)](buf3, primals_4, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_4
return buf3, reinterpret_tensor(buf1, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(primals_1, (16, 4, 4), (16, 1, 4), 0)
class BilinearAttentionNew(nn.Module):
"""
Computes attention between two matrices using a bilinear attention function. This
function has a matrix of weights ``W`` and a bias ``b``, and the similarity between
the two matrices ``X`` and ``Y`` is computed as ``X W Y^T + b``.
Input: - mat1: ``(batch_size, num_rows_1, mat1_dim)`` - mat2: ``(batch_size,
num_rows_2, mat2_dim)``
Output: - ``(batch_size, num_rows_1, num_rows_2)``
"""
def __init__(self, mat1_dim: 'int', mat2_dim: 'int', use_input_biases:
'bool'=False) ->None:
super().__init__()
if use_input_biases:
mat1_dim += 1
mat2_dim += 1
self.weight = nn.Parameter(torch.zeros(1, mat1_dim, mat2_dim))
self.bias = nn.Parameter(torch.zeros(1))
self._use_input_biases = use_input_biases
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.weight)
self.bias.data.fill_(0)
def forward(self, input_0, input_1):
primals_2 = self.weight
primals_4 = self.bias
primals_1 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| shabnam-b/crosslingual-nlp | BilinearAttention | false | 16,390 | [
"MIT"
] | 64 | ccd91baaea23004eab9c4d871910945ca3e61ab7 | https://github.com/shabnam-b/crosslingual-nlp/tree/ccd91baaea23004eab9c4d871910945ca3e61ab7 |
CRF | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/gr/cgrgn26uqtxgimenfvgz62m5dl5gx5oswcchih2g7tov6jmseilw.py
# Topologically Sorted Source Nodes: [crf_scores], Original ATen: [aten.add]
# Source node to ATen node mapping:
# crf_scores => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand, %unsqueeze_2), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = (xindex // 16)
x3 = xindex % 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x3), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(out_ptr0 + (x4), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [crf_scores], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(buf0, primals_3, primals_4, buf1, 256, grid=grid(256), stream=stream0)
del buf0
del primals_3
del primals_4
return (buf1, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.init
class CRF(nn.Module):
"""
Conditional Random Field.
"""
def __init__(self, hidden_dim, tagset_size):
"""
:param hidden_dim: size of word RNN/BLSTM's output
:param tagset_size: number of tags
"""
super(CRF, self).__init__()
self.tagset_size = tagset_size
self.emission = nn.Linear(hidden_dim, self.tagset_size)
self.transition = nn.Parameter(torch.Tensor(self.tagset_size, self.
tagset_size))
self.transition.data.zero_()
def forward(self, feats):
"""
Forward propagation.
:param feats: output of word RNN/BLSTM, a tensor of dimensions (batch_size, timesteps, hidden_dim)
:return: CRF scores, a tensor of dimensions (batch_size, timesteps, tagset_size, tagset_size)
"""
self.batch_size = feats.size(0)
self.timesteps = feats.size(1)
emission_scores = self.emission(feats)
emission_scores = emission_scores.unsqueeze(2).expand(self.
batch_size, self.timesteps, self.tagset_size, self.tagset_size)
crf_scores = emission_scores + self.transition.unsqueeze(0).unsqueeze(0
)
return crf_scores
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_dim': 4, 'tagset_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.nn.init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex % 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(out_ptr0 + x4, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](buf0, primals_3, primals_4, buf1,
256, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_3
del primals_4
return buf1, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0)
class CRFNew(nn.Module):
"""
Conditional Random Field.
"""
def __init__(self, hidden_dim, tagset_size):
"""
:param hidden_dim: size of word RNN/BLSTM's output
:param tagset_size: number of tags
"""
super(CRFNew, self).__init__()
self.tagset_size = tagset_size
self.emission = nn.Linear(hidden_dim, self.tagset_size)
self.transition = nn.Parameter(torch.Tensor(self.tagset_size, self.
tagset_size))
self.transition.data.zero_()
def forward(self, input_0):
primals_2 = self.transition
primals_4 = self.emission.weight
primals_3 = self.emission.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| sgrvinod/a-PyTorch-Tutorial-to-Sequence-Labeling | CRF | false | 16,391 | [
"MIT"
] | 334 | ee3f34b45a6e24dd748a144bfc25b1adf9e1f077 | https://github.com/sgrvinod/a-PyTorch-Tutorial-to-Sequence-Labeling/tree/ee3f34b45a6e24dd748a144bfc25b1adf9e1f077 |
ShiftBias | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/cv/ccvi3ckdyrvch4kf3zzpqqwrhax36nayxpvpfea7ljbfv6u6hcjf.py
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 4), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 4.0
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class ShiftBias(nn.Module):
def __init__(self, bias):
super(ShiftBias, self).__init__()
self.bias = bias
def forward(self, x):
return x + self.bias
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'bias': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 4.0
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ShiftBiasNew(nn.Module):
def __init__(self, bias):
super(ShiftBiasNew, self).__init__()
self.bias = bias
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| shaun95/StarGANv2-VC | ShiftBias | false | 16,392 | [
"MIT"
] | 116 | ed20538971a03d699351a349a3631767333baeb7 | https://github.com/shaun95/StarGANv2-VC/tree/ed20538971a03d699351a349a3631767333baeb7 |
BabyUnet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/rv/crvxjxabnh2k3i3u5bskna5q423guwirdsncuzca3mliii57nbni.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.reflection_pad2d]
# Source node to ATen node mapping:
# x => _unsafe_index, _unsafe_index_1
# Graph fragment:
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {})
triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 17424
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 66
x1 = (xindex // 66) % 66
x2 = (xindex // 4356)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4095 + ((-1)*(tl_math.abs((-63) + (tl_math.abs((-1) + x0))))) + ((-64)*(tl_math.abs((-63) + (tl_math.abs((-1) + x1))))) + (4096*x2)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ou/coud3mxcuxt5tt46lbpcehqytxiysa7ehg4q3hwg5crnt3z7rk5l.py
# Topologically Sorted Source Nodes: [x_1, x_2, x_3], Original ATen: [aten.convolution, aten.leaky_relu, aten.reflection_pad2d]
# Source node to ATen node mapping:
# x_1 => convolution
# x_2 => gt, mul, where
# x_3 => _unsafe_index_2, _unsafe_index_3
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.01), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
# %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_3 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_2, [None, None, None, %sub_1]), kwargs = {})
triton_poi_fused_convolution_leaky_relu_reflection_pad2d_1 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_reflection_pad2d_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_reflection_pad2d_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_reflection_pad2d_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 278784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 66
x1 = (xindex // 66) % 66
x4 = (xindex // 4356)
x2 = (xindex // 4356) % 16
x5 = xindex
tmp0 = tl.load(in_ptr0 + (4095 + ((-1)*(tl_math.abs((-63) + (tl_math.abs((-1) + x0))))) + ((-64)*(tl_math.abs((-63) + (tl_math.abs((-1) + x1))))) + (4096*x4)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x5), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/42/c42f4wzw6bn43zmeqhhe3kyiyobw5hxhgmabu4todkgydw4uxe72.py
# Topologically Sorted Source Nodes: [x_4, iadd, x_5], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
# Source node to ATen node mapping:
# iadd => add
# x_4 => convolution_1
# x_5 => gt_1, mul_1, where_1
# Graph fragment:
# %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_3, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_2, %primals_1), kwargs = {})
# %slice_scatter_default : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor, %add, 3, 0, 9223372036854775807), kwargs = {})
# %slice_scatter_default_1 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%convolution_1, %slice_scatter_default, 1, 0, 1), kwargs = {})
# %slice_scatter_default_2 : [num_users=3] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_1, %slice_12, 1, 0, 1), kwargs = {})
# %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%slice_scatter_default_2, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%slice_scatter_default_2, 0.01), kwargs = {})
# %where_1 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %slice_scatter_default_2, %mul_1), kwargs = {})
triton_poi_fused_add_convolution_leaky_relu_2 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4096) % 16
x3 = xindex
x0 = xindex % 4096
x2 = (xindex // 65536)
tmp21 = tl.load(in_out_ptr0 + (x3), None)
tmp22 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp0 = x1
tmp1 = tl.full([1], 1, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tmp2 & tmp2
tmp4 = tl.load(in_out_ptr0 + (x3), tmp3, other=0.0)
tmp5 = tl.load(in_ptr0 + (x1), tmp3, eviction_policy='evict_last', other=0.0)
tmp6 = tmp4 + tmp5
tmp7 = tl.load(in_ptr1 + (x0 + (4096*x2)), tmp3, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp3, tmp8, tmp9)
tmp11 = tl.load(in_out_ptr0 + (x3), tmp2, other=0.0)
tmp12 = tl.load(in_ptr0 + (x1), tmp2, eviction_policy='evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.where(tmp2, tmp10, tmp13)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp2, tmp14, tmp15)
tmp17 = tl.load(in_ptr1 + (x0 + (4096*x2)), tmp2, eviction_policy='evict_last', other=0.0)
tmp18 = tmp13 + tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp2, tmp18, tmp19)
tmp23 = tmp21 + tmp22
tmp24 = tl.where(tmp2, tmp20, tmp23)
tmp25 = tl.where(tmp2, tmp16, tmp24)
tmp26 = 0.0
tmp27 = tmp25 > tmp26
tmp28 = 0.01
tmp29 = tmp25 * tmp28
tmp30 = tl.where(tmp27, tmp25, tmp29)
tl.store(in_out_ptr0 + (x3), tmp30, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/zx/czxzdjkxda63ieps2a23pq7rypwh2ugcinnvqay7npz2a7xo77og.py
# Topologically Sorted Source Nodes: [x_6, x_7], Original ATen: [aten.avg_pool2d, aten.reflection_pad2d]
# Source node to ATen node mapping:
# x_6 => avg_pool2d
# x_7 => _unsafe_index_4, _unsafe_index_5
# Graph fragment:
# %avg_pool2d : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%where_1, [2, 2], [2, 2]), kwargs = {})
# %_unsafe_index_4 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%avg_pool2d, [None, None, %sub_9, None]), kwargs = {})
# %_unsafe_index_5 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_4, [None, None, None, %sub_9]), kwargs = {})
triton_poi_fused_avg_pool2d_reflection_pad2d_3 = async_compile.triton('triton_poi_fused_avg_pool2d_reflection_pad2d_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_reflection_pad2d_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_reflection_pad2d_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 73984
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 34
x1 = (xindex // 34) % 34
x2 = (xindex // 1156)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4030 + ((-128)*(tl_math.abs((-31) + (tl_math.abs((-1) + x1))))) + ((-2)*(tl_math.abs((-31) + (tl_math.abs((-1) + x0))))) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (4031 + ((-128)*(tl_math.abs((-31) + (tl_math.abs((-1) + x1))))) + ((-2)*(tl_math.abs((-31) + (tl_math.abs((-1) + x0))))) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4094 + ((-128)*(tl_math.abs((-31) + (tl_math.abs((-1) + x1))))) + ((-2)*(tl_math.abs((-31) + (tl_math.abs((-1) + x0))))) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (4095 + ((-128)*(tl_math.abs((-31) + (tl_math.abs((-1) + x1))))) + ((-2)*(tl_math.abs((-31) + (tl_math.abs((-1) + x0))))) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ym/cymu2siein6tccpla7olwuus2kqc6kdtxbejtk762fcm2rvhyiql.py
# Topologically Sorted Source Nodes: [x_8, x_9, x_10], Original ATen: [aten.convolution, aten.leaky_relu, aten.reflection_pad2d]
# Source node to ATen node mapping:
# x_10 => _unsafe_index_6, _unsafe_index_7
# x_8 => convolution_2
# x_9 => gt_2, mul_2, where_2
# Graph fragment:
# %convolution_2 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_5, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 0.01), kwargs = {})
# %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %convolution_2, %mul_2), kwargs = {})
# %_unsafe_index_6 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_2, [None, None, %sub_9, None]), kwargs = {})
# %_unsafe_index_7 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_6, [None, None, None, %sub_9]), kwargs = {})
triton_poi_fused_convolution_leaky_relu_reflection_pad2d_4 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_reflection_pad2d_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_reflection_pad2d_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_reflection_pad2d_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 147968
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 34
x1 = (xindex // 34) % 34
x4 = (xindex // 1156)
x2 = (xindex // 1156) % 32
x5 = xindex
tmp0 = tl.load(in_ptr0 + (1023 + ((-1)*(tl_math.abs((-31) + (tl_math.abs((-1) + x0))))) + ((-32)*(tl_math.abs((-31) + (tl_math.abs((-1) + x1))))) + (1024*x4)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x5), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/hi/chiomcjctwfp4zynmktvcxw3qizq2dwglofxmyjeisiqpodj2nvz.py
# Topologically Sorted Source Nodes: [x_6, x_11, iadd_1, x_12], Original ATen: [aten.avg_pool2d, aten.convolution, aten.add, aten.leaky_relu]
# Source node to ATen node mapping:
# iadd_1 => add_1
# x_11 => convolution_3
# x_12 => gt_3, mul_3, where_3
# x_6 => avg_pool2d
# Graph fragment:
# %avg_pool2d : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%where_1, [2, 2], [2, 2]), kwargs = {})
# %convolution_3 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_7, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_39, %avg_pool2d), kwargs = {})
# %slice_scatter_default_3 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_1, %add_1, 3, 0, 9223372036854775807), kwargs = {})
# %slice_scatter_default_4 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%convolution_3, %slice_scatter_default_3, 1, 0, 16), kwargs = {})
# %slice_scatter_default_5 : [num_users=3] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_4, %slice_49, 1, 0, 16), kwargs = {})
# %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%slice_scatter_default_5, 0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%slice_scatter_default_5, 0.01), kwargs = {})
# %where_3 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %slice_scatter_default_5, %mul_3), kwargs = {})
triton_poi_fused_add_avg_pool2d_convolution_leaky_relu_5 = async_compile.triton('triton_poi_fused_add_avg_pool2d_convolution_leaky_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_avg_pool2d_convolution_leaky_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_avg_pool2d_convolution_leaky_relu_5(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = (xindex // 1024) % 32
x5 = xindex
x0 = xindex % 32
x3 = (xindex // 32768)
x6 = (xindex // 32) % 1024
tmp18 = tl.load(in_out_ptr0 + (x5), None)
tmp19 = tl.load(in_ptr0 + (x2), None, eviction_policy='evict_last')
tmp0 = x2
tmp1 = tl.full([1], 16, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_out_ptr0 + (x5), tmp2, other=0.0)
tmp4 = tl.load(in_ptr0 + (x2), tmp2, eviction_policy='evict_last', other=0.0)
tmp5 = tmp3 + tmp4
tmp6 = tl.load(in_ptr1 + ((2*x0) + (128*x6) + (65536*x3)), tmp2, eviction_policy='evict_last', other=0.0)
tmp7 = tl.load(in_ptr1 + (1 + (2*x0) + (128*x6) + (65536*x3)), tmp2, eviction_policy='evict_last', other=0.0)
tmp8 = tmp7 + tmp6
tmp9 = tl.load(in_ptr1 + (64 + (2*x0) + (128*x6) + (65536*x3)), tmp2, eviction_policy='evict_last', other=0.0)
tmp10 = tmp9 + tmp8
tmp11 = tl.load(in_ptr1 + (65 + (2*x0) + (128*x6) + (65536*x3)), tmp2, eviction_policy='evict_last', other=0.0)
tmp12 = tmp11 + tmp10
tmp13 = 0.25
tmp14 = tmp12 * tmp13
tmp15 = tmp5 + tmp14
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp2, tmp15, tmp16)
tmp20 = tmp18 + tmp19
tmp21 = tl.where(tmp2, tmp17, tmp20)
tmp22 = tl.where(tmp2, tmp21, tmp21)
tmp23 = 0.0
tmp24 = tmp22 > tmp23
tmp25 = 0.01
tmp26 = tmp22 * tmp25
tmp27 = tl.where(tmp24, tmp22, tmp26)
tl.store(in_out_ptr0 + (x5), tmp27, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/py/cpyb6kasamnoqmeqoid4mdunurissqjbh7cpra62xp5au3l6ep4j.py
# Topologically Sorted Source Nodes: [x_20], Original ATen: [aten.arange]
# Source node to ATen node mapping:
# x_20 => iota_12
# Graph fragment:
# %iota_12 : [num_users=2] = call_function[target=torch.ops.prims.iota.default](args = (32,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
triton_poi_fused_arange_6 = async_compile.triton('triton_poi_fused_arange_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_arange_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_arange_6(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/td/ctdrp5ezackwofdeuxqvqz32izlgnkvqm7rnuz2nzwh4uzhxiowl.py
# Topologically Sorted Source Nodes: [x_20], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# x_20 => convert_element_type_1
# Graph fragment:
# %convert_element_type_1 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {})
triton_poi_fused__to_copy_7 = async_compile.triton('triton_poi_fused__to_copy_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_7(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/fc/cfcabq2vqzy3iz7bxnnbm7sq7lldaoyihnc4gimded2qojurbycz.py
# Topologically Sorted Source Nodes: [x_20], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# x_20 => add_4, clamp_max
# Graph fragment:
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_1, 1), kwargs = {})
# %clamp_max : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_4, 15), kwargs = {})
triton_poi_fused_add_clamp_8 = async_compile.triton('triton_poi_fused_add_clamp_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_8(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 15, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + (x0), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/2q/c2q5w2pc2czu7x3qqpsah6ii7gqp6klrdlbtgd6yswh3ud6gn3ec.py
# Topologically Sorted Source Nodes: [x_20], Original ATen: [aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
# Source node to ATen node mapping:
# x_20 => add_3, clamp_max_2, clamp_min, clamp_min_2, convert_element_type, mul_6, sub_24, sub_26
# Graph fragment:
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_12, torch.float32), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.5), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_3, 0.5), kwargs = {})
# %sub_24 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_6, 0.5), kwargs = {})
# %clamp_min : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_24, 0.0), kwargs = {})
# %sub_26 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min, %convert_element_type_3), kwargs = {})
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_26, 0.0), kwargs = {})
# %clamp_max_2 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {})
triton_poi_fused__to_copy_add_clamp_mul_sub_9 = async_compile.triton('triton_poi_fused__to_copy_add_clamp_mul_sub_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_clamp_mul_sub_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_clamp_mul_sub_9(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/vu/cvuxfktapawpqedowl27msedtzbcjv6brvygxkddei3yw5eh2glk.py
# Topologically Sorted Source Nodes: [x_13, x_20], Original ATen: [aten.avg_pool2d, aten._unsafe_index, aten.sub, aten.mul, aten.add]
# Source node to ATen node mapping:
# x_13 => avg_pool2d_1
# x_20 => _unsafe_index_12, _unsafe_index_13, _unsafe_index_14, _unsafe_index_15, add_7, add_8, mul_8, mul_9, sub_27, sub_28, sub_30
# Graph fragment:
# %avg_pool2d_1 : [num_users=4] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%where_3, [2, 2], [2, 2]), kwargs = {})
# %_unsafe_index_12 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%avg_pool2d_1, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_13 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%avg_pool2d_1, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {})
# %_unsafe_index_14 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%avg_pool2d_1, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_15 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%avg_pool2d_1, [None, None, %clamp_max, %clamp_max_1]), kwargs = {})
# %sub_27 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_13, %_unsafe_index_12), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_27, %clamp_max_2), kwargs = {})
# %add_7 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_12, %mul_8), kwargs = {})
# %sub_28 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_15, %_unsafe_index_14), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_28, %clamp_max_2), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_14, %mul_9), kwargs = {})
# %sub_30 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_8, %add_7), kwargs = {})
triton_poi_fused__unsafe_index_add_avg_pool2d_mul_sub_10 = async_compile.triton('triton_poi_fused__unsafe_index_add_avg_pool2d_mul_sub_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i64', 3: '*i64', 4: '*fp32', 5: '*i64', 6: '*i64', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_avg_pool2d_mul_sub_10', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_avg_pool2d_mul_sub_10(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 32) % 32
x0 = xindex % 32
x2 = (xindex // 1024)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr3 + (x1), None, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp43 = tl.load(in_ptr5 + (x0), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 16, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + ((2*tmp8) + (64*tmp4) + (1024*x2)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (1 + (2*tmp8) + (64*tmp4) + (1024*x2)), None, eviction_policy='evict_last')
tmp11 = tmp10 + tmp9
tmp12 = tl.load(in_ptr2 + (32 + (2*tmp8) + (64*tmp4) + (1024*x2)), None, eviction_policy='evict_last')
tmp13 = tmp12 + tmp11
tmp14 = tl.load(in_ptr2 + (33 + (2*tmp8) + (64*tmp4) + (1024*x2)), None, eviction_policy='evict_last')
tmp15 = tmp14 + tmp13
tmp16 = 0.25
tmp17 = tmp15 * tmp16
tmp19 = tmp18 + tmp1
tmp20 = tmp18 < 0
tmp21 = tl.where(tmp20, tmp19, tmp18)
tmp22 = tl.load(in_ptr2 + ((2*tmp8) + (64*tmp21) + (1024*x2)), None, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr2 + (1 + (2*tmp8) + (64*tmp21) + (1024*x2)), None, eviction_policy='evict_last')
tmp24 = tmp23 + tmp22
tmp25 = tl.load(in_ptr2 + (32 + (2*tmp8) + (64*tmp21) + (1024*x2)), None, eviction_policy='evict_last')
tmp26 = tmp25 + tmp24
tmp27 = tl.load(in_ptr2 + (33 + (2*tmp8) + (64*tmp21) + (1024*x2)), None, eviction_policy='evict_last')
tmp28 = tmp27 + tmp26
tmp29 = tmp28 * tmp16
tmp31 = tmp30 + tmp1
tmp32 = tmp30 < 0
tmp33 = tl.where(tmp32, tmp31, tmp30)
tmp34 = tl.load(in_ptr2 + ((2*tmp33) + (64*tmp21) + (1024*x2)), None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr2 + (1 + (2*tmp33) + (64*tmp21) + (1024*x2)), None, eviction_policy='evict_last')
tmp36 = tmp35 + tmp34
tmp37 = tl.load(in_ptr2 + (32 + (2*tmp33) + (64*tmp21) + (1024*x2)), None, eviction_policy='evict_last')
tmp38 = tmp37 + tmp36
tmp39 = tl.load(in_ptr2 + (33 + (2*tmp33) + (64*tmp21) + (1024*x2)), None, eviction_policy='evict_last')
tmp40 = tmp39 + tmp38
tmp41 = tmp40 * tmp16
tmp42 = tmp41 - tmp29
tmp44 = tmp42 * tmp43
tmp45 = tmp29 + tmp44
tmp46 = tl.load(in_ptr2 + ((2*tmp33) + (64*tmp4) + (1024*x2)), None, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr2 + (1 + (2*tmp33) + (64*tmp4) + (1024*x2)), None, eviction_policy='evict_last')
tmp48 = tmp47 + tmp46
tmp49 = tl.load(in_ptr2 + (32 + (2*tmp33) + (64*tmp4) + (1024*x2)), None, eviction_policy='evict_last')
tmp50 = tmp49 + tmp48
tmp51 = tl.load(in_ptr2 + (33 + (2*tmp33) + (64*tmp4) + (1024*x2)), None, eviction_policy='evict_last')
tmp52 = tmp51 + tmp50
tmp53 = tmp52 * tmp16
tmp54 = tmp53 - tmp17
tmp55 = tmp54 * tmp43
tmp56 = tmp17 + tmp55
tmp57 = tmp56 - tmp45
tl.store(in_out_ptr0 + (x4), tmp45, None)
tl.store(in_out_ptr1 + (x4), tmp57, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/aj/cajrwo2ocb7g7beyrotyiorx2c75nu4zqmhxllbnz6uua7c3gcib.py
# Topologically Sorted Source Nodes: [x_21, x_22], Original ATen: [aten.cat, aten.reflection_pad2d]
# Source node to ATen node mapping:
# x_21 => cat
# x_22 => _unsafe_index_16, _unsafe_index_17
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%add_9, %where_3], 1), kwargs = {})
# %_unsafe_index_16 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%cat, [None, None, %sub_9, None]), kwargs = {})
# %_unsafe_index_17 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_16, [None, None, None, %sub_9]), kwargs = {})
triton_poi_fused_cat_reflection_pad2d_11 = async_compile.triton('triton_poi_fused_cat_reflection_pad2d_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_reflection_pad2d_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_reflection_pad2d_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 295936
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 1156) % 64
x0 = xindex % 34
x1 = (xindex // 34) % 34
x3 = (xindex // 73984)
x5 = xindex
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (1023 + ((-1)*(tl_math.abs((-31) + (tl_math.abs((-1) + x0))))) + ((-32)*(tl_math.abs((-31) + (tl_math.abs((-1) + x1))))) + (1024*x2) + (32768*x3)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (1023 + ((-1)*(tl_math.abs((-31) + (tl_math.abs((-1) + x0))))) + ((-32)*(tl_math.abs((-31) + (tl_math.abs((-1) + x1))))) + (1024*x2) + (32768*x3)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tl.load(in_ptr2 + (31 + ((-1)*(tl_math.abs((-31) + (tl_math.abs((-1) + x1)))))), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 * tmp7
tmp9 = tmp5 + tmp8
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 64, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tl.load(in_ptr3 + (1023 + ((-1)*(tl_math.abs((-31) + (tl_math.abs((-1) + x0))))) + ((-32)*(tl_math.abs((-31) + (tl_math.abs((-1) + x1))))) + (1024*((-32) + x2)) + (32768*x3)), tmp12 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tl.where(tmp4, tmp11, tmp15)
tl.store(out_ptr0 + (x5), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/zv/czv5yfgeudn3p5axnn6atcnxq4xfjccfzuo3ekpa3nffxiuj7pok.py
# Topologically Sorted Source Nodes: [x_26, iadd_3, x_27], Original ATen: [aten.convolution, aten.add, aten.leaky_relu, aten.leaky_relu_backward]
# Source node to ATen node mapping:
# iadd_3 => add_10
# x_26 => convolution_7
# x_27 => gt_7, mul_12, where_7
# Graph fragment:
# %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_19, %primals_16, %primals_17, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %add_10 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_7, %slice_107), kwargs = {})
# %gt_7 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_10, 0), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_10, 0.01), kwargs = {})
# %where_7 : [num_users=5] = call_function[target=torch.ops.aten.where.self](args = (%gt_7, %add_10, %mul_12), kwargs = {})
# %gt_12 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where_7, 0), kwargs = {})
triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_12 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*i1', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_12', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_12(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x4 = xindex
x2 = (xindex // 1024) % 32
x3 = (xindex // 32768)
x5 = xindex % 1024
x1 = (xindex // 32) % 32
tmp0 = tl.load(in_out_ptr0 + (x4), None)
tmp1 = tl.load(in_ptr0 + (x2), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = x2
tmp4 = tl.full([1], 0, tl.int64)
tmp5 = tmp3 >= tmp4
tmp6 = tl.full([1], 32, tl.int64)
tmp7 = tmp3 < tmp6
tmp8 = tl.load(in_ptr1 + (x5 + (1024*x2) + (32768*x3)), tmp7, other=0.0)
tmp9 = tl.load(in_ptr2 + (x5 + (1024*x2) + (32768*x3)), tmp7, other=0.0)
tmp10 = tl.load(in_ptr3 + (x1), tmp7, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 * tmp10
tmp12 = tmp8 + tmp11
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp7, tmp12, tmp13)
tmp15 = tmp3 >= tmp6
tmp16 = tl.full([1], 64, tl.int64)
tmp17 = tmp3 < tmp16
tmp18 = tl.load(in_ptr4 + (x5 + (1024*((-32) + x2)) + (32768*x3)), tmp15, other=0.0)
tmp19 = tl.where(tmp7, tmp14, tmp18)
tmp20 = tmp2 + tmp19
tmp21 = 0.0
tmp22 = tmp20 > tmp21
tmp23 = 0.01
tmp24 = tmp20 * tmp23
tmp25 = tl.where(tmp22, tmp20, tmp24)
tmp26 = tmp25 > tmp21
tl.store(in_out_ptr0 + (x4), tmp20, None)
tl.store(out_ptr0 + (x4), tmp26, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/op/coppgslneiay6uepaw7mnyzu2ecyywgg6qnnmsakzwfm4lga4lyn.py
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.arange]
# Source node to ATen node mapping:
# x_28 => iota_18
# Graph fragment:
# %iota_18 : [num_users=2] = call_function[target=torch.ops.prims.iota.default](args = (64,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
triton_poi_fused_arange_13 = async_compile.triton('triton_poi_fused_arange_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_arange_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_arange_13(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/65/c65inofqcew3p3jufahbpodugxpqwl7kptzkuw5m2wzzolofmbzu.py
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# x_28 => convert_element_type_5
# Graph fragment:
# %convert_element_type_5 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_2, torch.int64), kwargs = {})
triton_poi_fused__to_copy_14 = async_compile.triton('triton_poi_fused__to_copy_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_14(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5p/c5pvjuy43sbz42gq7ordank2dnieh6ki4badokc3fz77mixwt544.py
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# x_28 => add_12, clamp_max_4
# Graph fragment:
# %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_5, 1), kwargs = {})
# %clamp_max_4 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_12, 31), kwargs = {})
triton_poi_fused_add_clamp_15 = async_compile.triton('triton_poi_fused_add_clamp_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_15(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 31, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + (x0), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/z2/cz2n7s7eoycgi3cgx4ggoegquhzxfnvx33jpcc2b43avdulr2egk.py
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
# Source node to ATen node mapping:
# x_28 => add_11, clamp_max_6, clamp_min_4, clamp_min_6, convert_element_type_4, mul_13, sub_39, sub_41
# Graph fragment:
# %convert_element_type_4 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_18, torch.float32), kwargs = {})
# %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_4, 0.5), kwargs = {})
# %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_11, 0.5), kwargs = {})
# %sub_39 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_13, 0.5), kwargs = {})
# %clamp_min_4 : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_39, 0.0), kwargs = {})
# %sub_41 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_4, %convert_element_type_7), kwargs = {})
# %clamp_min_6 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_41, 0.0), kwargs = {})
# %clamp_max_6 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_6, 1.0), kwargs = {})
triton_poi_fused__to_copy_add_clamp_mul_sub_16 = async_compile.triton('triton_poi_fused__to_copy_add_clamp_mul_sub_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_clamp_mul_sub_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_clamp_mul_sub_16(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ch/cchmph6nmawzap7etsr2cslvq4mm3xqr2oq4jmuepxgog4ay2vmx.py
# Topologically Sorted Source Nodes: [x_27, x_28], Original ATen: [aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
# Source node to ATen node mapping:
# x_27 => gt_7, mul_12, where_7
# x_28 => _unsafe_index_20, _unsafe_index_21, add_15, mul_15, sub_42
# Graph fragment:
# %gt_7 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_10, 0), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_10, 0.01), kwargs = {})
# %where_7 : [num_users=5] = call_function[target=torch.ops.aten.where.self](args = (%gt_7, %add_10, %mul_12), kwargs = {})
# %_unsafe_index_20 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_7, [None, None, %convert_element_type_5, %convert_element_type_7]), kwargs = {})
# %_unsafe_index_21 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_7, [None, None, %convert_element_type_5, %clamp_max_5]), kwargs = {})
# %sub_42 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_21, %_unsafe_index_20), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_42, %clamp_max_6), kwargs = {})
# %add_15 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_20, %mul_15), kwargs = {})
triton_poi_fused__unsafe_index_add_leaky_relu_mul_sub_17 = async_compile.triton('triton_poi_fused__unsafe_index_add_leaky_relu_mul_sub_17', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*i64', 2: '*fp32', 3: '*i64', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_leaky_relu_mul_sub_17', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_leaky_relu_mul_sub_17(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 64) % 64
x0 = xindex % 64
x2 = (xindex // 4096)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 32, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + (32*tmp4) + (1024*x2)), None, eviction_policy='evict_last')
tmp10 = 0.0
tmp11 = tmp9 > tmp10
tmp12 = 0.01
tmp13 = tmp9 * tmp12
tmp14 = tl.where(tmp11, tmp9, tmp13)
tmp16 = tmp15 + tmp1
tmp17 = tmp15 < 0
tmp18 = tl.where(tmp17, tmp16, tmp15)
tmp19 = tl.load(in_ptr2 + (tmp18 + (32*tmp4) + (1024*x2)), None, eviction_policy='evict_last')
tmp20 = tmp19 > tmp10
tmp21 = tmp19 * tmp12
tmp22 = tl.where(tmp20, tmp19, tmp21)
tmp23 = tmp22 - tmp14
tmp25 = tmp23 * tmp24
tmp26 = tmp14 + tmp25
tl.store(out_ptr0 + (x4), tmp26, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/jr/cjrdhr5vczebwsxptggru3pet36lp56ln42slzq4osnsyiqlebxa.py
# Topologically Sorted Source Nodes: [x_29], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_29 => cat_1
# Graph fragment:
# %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%add_17, %where_1], 1), kwargs = {})
triton_poi_fused_cat_18 = async_compile.triton('triton_poi_fused_cat_18', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*fp32', 4: '*i64', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_18', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_18(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 786432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = (xindex // 4096) % 48
x3 = (xindex // 196608)
x4 = xindex % 4096
x1 = (xindex // 64) % 64
x0 = xindex % 64
x5 = xindex
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x4 + (4096*x2) + (131072*x3)), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tl.full([XBLOCK], 32, tl.int32)
tmp8 = tmp6 + tmp7
tmp9 = tmp6 < 0
tmp10 = tl.where(tmp9, tmp8, tmp6)
tmp11 = tl.load(in_ptr2 + (x0), tmp4, eviction_policy='evict_last', other=0.0)
tmp12 = tmp11 + tmp7
tmp13 = tmp11 < 0
tmp14 = tl.where(tmp13, tmp12, tmp11)
tmp15 = tl.load(in_ptr3 + (tmp14 + (32*tmp10) + (1024*x2) + (32768*x3)), tmp4, eviction_policy='evict_last', other=0.0)
tmp16 = 0.0
tmp17 = tmp15 > tmp16
tmp18 = 0.01
tmp19 = tmp15 * tmp18
tmp20 = tl.where(tmp17, tmp15, tmp19)
tmp21 = tl.load(in_ptr4 + (x0), tmp4, eviction_policy='evict_last', other=0.0)
tmp22 = tmp21 + tmp7
tmp23 = tmp21 < 0
tmp24 = tl.where(tmp23, tmp22, tmp21)
tmp25 = tl.load(in_ptr3 + (tmp24 + (32*tmp10) + (1024*x2) + (32768*x3)), tmp4, eviction_policy='evict_last', other=0.0)
tmp26 = tmp25 > tmp16
tmp27 = tmp25 * tmp18
tmp28 = tl.where(tmp26, tmp25, tmp27)
tmp29 = tmp28 - tmp20
tmp30 = tl.load(in_ptr5 + (x0), tmp4, eviction_policy='evict_last', other=0.0)
tmp31 = tmp29 * tmp30
tmp32 = tmp20 + tmp31
tmp33 = tmp32 - tmp5
tmp34 = tl.load(in_ptr6 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp35 = tmp33 * tmp34
tmp36 = tmp5 + tmp35
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp4, tmp36, tmp37)
tmp39 = tmp0 >= tmp3
tmp40 = tl.full([1], 48, tl.int64)
tmp41 = tmp0 < tmp40
tmp42 = tl.load(in_ptr7 + (x4 + (4096*((-32) + x2)) + (65536*x3)), tmp39, other=0.0)
tmp43 = tl.where(tmp4, tmp38, tmp42)
tl.store(out_ptr0 + (x5), tmp43, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5e/c5epkxuqcyqhybbd7uerkumkhz2oldheytu6smleihsfca3bdgfm.py
# Topologically Sorted Source Nodes: [x_30], Original ATen: [aten.reflection_pad2d]
# Source node to ATen node mapping:
# x_30 => _unsafe_index_24, _unsafe_index_25
# Graph fragment:
# %_unsafe_index_24 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%cat_1, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_25 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_24, [None, None, None, %sub_1]), kwargs = {})
triton_poi_fused_reflection_pad2d_19 = async_compile.triton('triton_poi_fused_reflection_pad2d_19', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_19', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_19(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 836352
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 66
x1 = (xindex // 66) % 66
x2 = (xindex // 4356)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4095 + ((-1)*(tl_math.abs((-63) + (tl_math.abs((-1) + x0))))) + ((-64)*(tl_math.abs((-63) + (tl_math.abs((-1) + x1))))) + (4096*x2)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/2x/c2xyvxy4dlxxxs7grcilk2p2nhcolq5w6ep2kl6up5amlhp4dcn3.py
# Topologically Sorted Source Nodes: [x_34, iadd_4, x_35], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
# Source node to ATen node mapping:
# iadd_4 => add_18
# x_34 => convolution_9
# x_35 => gt_9, mul_19, where_9
# Graph fragment:
# %convolution_9 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_27, %primals_20, %primals_21, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %add_18 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_9, %slice_136), kwargs = {})
# %gt_9 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_18, 0), kwargs = {})
# %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_18, 0.01), kwargs = {})
# %where_9 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_9, %add_18, %mul_19), kwargs = {})
triton_poi_fused_add_convolution_leaky_relu_20 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_20', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_20', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_20(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 16
x2 = (xindex // 65536)
x4 = xindex % 65536
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x4 + (196608*x2)), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 > tmp5
tmp7 = 0.01
tmp8 = tmp4 * tmp7
tmp9 = tl.where(tmp6, tmp4, tmp8)
tl.store(in_out_ptr0 + (x3), tmp9, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/qz/cqz4jj5cdhpzmf5i34l4ip2tryju5qinbsnirbvafkdea243px5y.py
# Topologically Sorted Source Nodes: [x_36], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_36 => convolution_10
# Graph fragment:
# %convolution_10 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_9, %primals_22, %primals_23, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_21 = async_compile.triton('triton_poi_fused_convolution_21', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_21', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_21(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), None)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + (x0), tmp3, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/fe/cfesq3jlbhy3hzv3kthghm5lbmmlvjzz2himhtovql3rnxshuu2w.py
# Topologically Sorted Source Nodes: [x_31, x_32], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
# Source node to ATen node mapping:
# x_31 => convolution_8
# x_32 => gt_8, mul_18, where_8
# Graph fragment:
# %convolution_8 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_25, %primals_18, %primals_19, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_8 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_8, 0), kwargs = {})
# %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_8, 0.01), kwargs = {})
# %where_8 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_8, %convolution_8, %mul_18), kwargs = {})
# %gt_11 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where_8, 0), kwargs = {})
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_22 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_22', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_22', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_22(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 16
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(out_ptr0 + (x3), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/f7/cf7dolczljdur2sm7lb4q4kbgvgsrqjhwtuydwjnp7py6cnwawlw.py
# Topologically Sorted Source Nodes: [x_23, x_24], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
# Source node to ATen node mapping:
# x_23 => convolution_6
# x_24 => gt_6, mul_11, where_6
# Graph fragment:
# %convolution_6 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_17, %primals_14, %primals_15, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_6 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_6, 0), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_6, 0.01), kwargs = {})
# %where_6 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_6, %convolution_6, %mul_11), kwargs = {})
# %gt_13 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where_6, 0), kwargs = {})
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_23 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_23', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_23', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_23(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 32
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(out_ptr0 + (x3), tmp8, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_2, (16, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_3, (16, ), (1, ))
assert_size_stride(primals_4, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (32, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_7, (32, ), (1, ))
assert_size_stride(primals_8, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_9, (32, ), (1, ))
assert_size_stride(primals_10, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_11, (32, ), (1, ))
assert_size_stride(primals_12, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_13, (32, ), (1, ))
assert_size_stride(primals_14, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_15, (32, ), (1, ))
assert_size_stride(primals_16, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_17, (32, ), (1, ))
assert_size_stride(primals_18, (16, 48, 3, 3), (432, 9, 3, 1))
assert_size_stride(primals_19, (16, ), (1, ))
assert_size_stride(primals_20, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_21, (16, ), (1, ))
assert_size_stride(primals_22, (1, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_23, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 66, 66), (4356, 4356, 66, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.reflection_pad2d]
stream0 = get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0.run(primals_1, buf0, 17424, grid=grid(17424), stream=stream0)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf2 = empty_strided_cuda((4, 16, 66, 66), (69696, 4356, 66, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1, x_2, x_3], Original ATen: [aten.convolution, aten.leaky_relu, aten.reflection_pad2d]
triton_poi_fused_convolution_leaky_relu_reflection_pad2d_1.run(buf1, primals_3, buf2, 278784, grid=grid(278784), stream=stream0)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [x_4, iadd, x_5], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
triton_poi_fused_add_convolution_leaky_relu_2.run(buf4, primals_5, primals_1, 262144, grid=grid(262144), stream=stream0)
del primals_1
del primals_5
buf5 = empty_strided_cuda((4, 16, 34, 34), (18496, 1156, 34, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_6, x_7], Original ATen: [aten.avg_pool2d, aten.reflection_pad2d]
triton_poi_fused_avg_pool2d_reflection_pad2d_3.run(buf4, buf5, 73984, grid=grid(73984), stream=stream0)
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf7 = empty_strided_cuda((4, 32, 34, 34), (36992, 1156, 34, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_8, x_9, x_10], Original ATen: [aten.convolution, aten.leaky_relu, aten.reflection_pad2d]
triton_poi_fused_convolution_leaky_relu_reflection_pad2d_4.run(buf6, primals_7, buf7, 147968, grid=grid(147968), stream=stream0)
# Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf9 = buf8; del buf8 # reuse
buf10 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [x_6, x_11, iadd_1, x_12], Original ATen: [aten.avg_pool2d, aten.convolution, aten.add, aten.leaky_relu]
triton_poi_fused_add_avg_pool2d_convolution_leaky_relu_5.run(buf10, primals_9, buf4, 131072, grid=grid(131072), stream=stream0)
del primals_9
buf11 = empty_strided_cuda((32, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_20], Original ATen: [aten.arange]
triton_poi_fused_arange_6.run(buf11, 32, grid=grid(32), stream=stream0)
buf12 = empty_strided_cuda((32, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x_20], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_7.run(buf12, 32, grid=grid(32), stream=stream0)
buf13 = empty_strided_cuda((32, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x_20], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_8.run(buf13, 32, grid=grid(32), stream=stream0)
buf14 = empty_strided_cuda((32, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_20], Original ATen: [aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_7.run(buf14, 32, grid=grid(32), stream=stream0)
buf15 = empty_strided_cuda((32, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_20], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_8.run(buf15, 32, grid=grid(32), stream=stream0)
buf18 = empty_strided_cuda((32, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_20], Original ATen: [aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_clamp_mul_sub_9.run(buf18, 32, grid=grid(32), stream=stream0)
buf17 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.float32)
buf16 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.float32)
buf19 = buf16; del buf16 # reuse
buf21 = buf17; del buf17 # reuse
# Topologically Sorted Source Nodes: [x_13, x_20], Original ATen: [aten.avg_pool2d, aten._unsafe_index, aten.sub, aten.mul, aten.add]
triton_poi_fused__unsafe_index_add_avg_pool2d_mul_sub_10.run(buf19, buf21, buf13, buf14, buf10, buf12, buf15, buf18, 131072, grid=grid(131072), stream=stream0)
buf20 = empty_strided_cuda((32, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_20], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_clamp_mul_sub_9.run(buf20, 32, grid=grid(32), stream=stream0)
buf22 = empty_strided_cuda((4, 64, 34, 34), (73984, 1156, 34, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_21, x_22], Original ATen: [aten.cat, aten.reflection_pad2d]
triton_poi_fused_cat_reflection_pad2d_11.run(buf19, buf21, buf20, buf10, buf22, 295936, grid=grid(295936), stream=stream0)
# Topologically Sorted Source Nodes: [x_23], Original ATen: [aten.convolution]
buf23 = extern_kernels.convolution(buf22, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf23, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf24 = empty_strided_cuda((4, 32, 34, 34), (36992, 1156, 34, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_23, x_24, x_25], Original ATen: [aten.convolution, aten.leaky_relu, aten.reflection_pad2d]
triton_poi_fused_convolution_leaky_relu_reflection_pad2d_4.run(buf23, primals_15, buf24, 147968, grid=grid(147968), stream=stream0)
# Topologically Sorted Source Nodes: [x_26], Original ATen: [aten.convolution]
buf25 = extern_kernels.convolution(buf24, primals_16, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf26 = buf25; del buf25 # reuse
buf44 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_26, iadd_3, x_27], Original ATen: [aten.convolution, aten.add, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_12.run(buf26, primals_17, buf19, buf21, buf20, buf10, buf44, 131072, grid=grid(131072), stream=stream0)
del buf19
del buf21
del primals_17
buf27 = empty_strided_cuda((64, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.arange]
triton_poi_fused_arange_13.run(buf27, 64, grid=grid(64), stream=stream0)
buf28 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_14.run(buf28, 64, grid=grid(64), stream=stream0)
buf29 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_15.run(buf29, 64, grid=grid(64), stream=stream0)
buf30 = empty_strided_cuda((64, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_14.run(buf30, 64, grid=grid(64), stream=stream0)
buf31 = empty_strided_cuda((64, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_15.run(buf31, 64, grid=grid(64), stream=stream0)
buf32 = empty_strided_cuda((64, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_clamp_mul_sub_16.run(buf32, 64, grid=grid(64), stream=stream0)
buf33 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_27, x_28], Original ATen: [aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
triton_poi_fused__unsafe_index_add_leaky_relu_mul_sub_17.run(buf28, buf30, buf26, buf31, buf32, buf33, 524288, grid=grid(524288), stream=stream0)
buf34 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_clamp_mul_sub_16.run(buf34, 64, grid=grid(64), stream=stream0)
buf35 = empty_strided_cuda((4, 48, 64, 64), (196608, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_29], Original ATen: [aten.cat]
triton_poi_fused_cat_18.run(buf33, buf29, buf30, buf26, buf31, buf32, buf34, buf4, buf35, 786432, grid=grid(786432), stream=stream0)
del buf26
del buf33
buf36 = empty_strided_cuda((4, 48, 66, 66), (209088, 4356, 66, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_30], Original ATen: [aten.reflection_pad2d]
triton_poi_fused_reflection_pad2d_19.run(buf35, buf36, 836352, grid=grid(836352), stream=stream0)
# Topologically Sorted Source Nodes: [x_31], Original ATen: [aten.convolution]
buf37 = extern_kernels.convolution(buf36, primals_18, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf37, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf38 = empty_strided_cuda((4, 16, 66, 66), (69696, 4356, 66, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_31, x_32, x_33], Original ATen: [aten.convolution, aten.leaky_relu, aten.reflection_pad2d]
triton_poi_fused_convolution_leaky_relu_reflection_pad2d_1.run(buf37, primals_19, buf38, 278784, grid=grid(278784), stream=stream0)
# Topologically Sorted Source Nodes: [x_34], Original ATen: [aten.convolution]
buf39 = extern_kernels.convolution(buf38, primals_20, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf39, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf40 = buf39; del buf39 # reuse
# Topologically Sorted Source Nodes: [x_34, iadd_4, x_35], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
triton_poi_fused_add_convolution_leaky_relu_20.run(buf40, primals_21, buf35, 262144, grid=grid(262144), stream=stream0)
del buf35
del primals_21
# Topologically Sorted Source Nodes: [x_36], Original ATen: [aten.convolution]
buf41 = extern_kernels.convolution(buf40, primals_22, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf41, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf42 = buf41; del buf41 # reuse
# Topologically Sorted Source Nodes: [x_36], Original ATen: [aten.convolution]
triton_poi_fused_convolution_21.run(buf42, primals_23, 16384, grid=grid(16384), stream=stream0)
del primals_23
buf43 = empty_strided_cuda((4, 16, 64, 64), (65536, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_31, x_32], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_22.run(buf37, primals_19, buf43, 262144, grid=grid(262144), stream=stream0)
del buf37
del primals_19
buf45 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_23, x_24], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_23.run(buf23, primals_15, buf45, 131072, grid=grid(131072), stream=stream0)
del buf23
del primals_15
buf46 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_8, x_9], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_23.run(buf6, primals_7, buf46, 131072, grid=grid(131072), stream=stream0)
del buf6
del primals_7
buf47 = empty_strided_cuda((4, 16, 64, 64), (65536, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_22.run(buf1, primals_3, buf47, 262144, grid=grid(262144), stream=stream0)
del buf1
del primals_3
return (buf42, primals_2, primals_4, primals_6, primals_8, primals_14, primals_16, primals_18, primals_20, primals_22, buf0, buf2, buf4, buf5, buf7, buf10, buf11, buf12, buf13, buf14, buf15, buf18, buf20, buf22, buf24, buf27, buf28, buf29, buf30, buf31, buf32, buf34, buf36, buf38, buf40, buf43, buf44, buf45, buf46, buf47, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((32, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((32, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((16, 48, 3, 3), (432, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((16, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((1, 16, 1, 1), (16, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, dropout=False, norm=None,
residual=True, activation='leakyrelu', in_place_activation=True,
transpose=False, reflectpad=True):
super(ConvBlock, self).__init__()
self.dropout = dropout
self.residual = residual
self.activation = activation
self.transpose = transpose
self.reflectpad = reflectpad
if self.dropout:
self.dropout1 = nn.Dropout2d(p=0.05)
self.dropout2 = nn.Dropout2d(p=0.05)
self.norm1 = None
self.norm2 = None
if norm is not None:
if norm == 'batch':
self.norm1 = nn.BatchNorm2d(out_channels)
self.norm2 = nn.BatchNorm2d(out_channels)
elif norm == 'instance':
self.norm1 = nn.InstanceNorm2d(out_channels, affine=True)
self.norm2 = nn.InstanceNorm2d(out_channels, affine=True)
if self.transpose:
self.conv1 = nn.ConvTranspose2d(in_channels, out_channels,
kernel_size=3, padding=0 if self.reflectpad else 1)
self.conv2 = nn.ConvTranspose2d(out_channels, out_channels,
kernel_size=3, padding=0 if self.reflectpad else 1)
else:
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3,
padding=0 if self.reflectpad else 1)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=
3, padding=0 if self.reflectpad else 1)
if self.activation == 'relu':
self.actfun1 = nn.ReLU(inplace=in_place_activation)
self.actfun2 = nn.ReLU(inplace=in_place_activation)
elif self.activation == 'leakyrelu':
self.actfun1 = nn.LeakyReLU(inplace=in_place_activation)
self.actfun2 = nn.LeakyReLU(inplace=in_place_activation)
elif self.activation == 'elu':
self.actfun1 = nn.ELU(inplace=in_place_activation)
self.actfun2 = nn.ELU(inplace=in_place_activation)
elif self.activation == 'selu':
self.actfun1 = nn.SELU(inplace=in_place_activation)
self.actfun2 = nn.SELU(inplace=in_place_activation)
if self.reflectpad:
self.rpad1 = nn.ReflectionPad2d(1)
self.rpad2 = nn.ReflectionPad2d(1)
def forward(self, x):
ox = x
if self.reflectpad:
x = self.rpad1(x)
x = self.conv1(x)
if self.dropout:
x = self.dropout1(x)
x = self.actfun1(x)
if self.norm1:
x = self.norm1(x)
if self.reflectpad:
x = self.rpad2(x)
x = self.conv2(x)
if self.dropout:
x = self.dropout2(x)
if self.residual:
x[:, 0:min(ox.shape[1], x.shape[1]), :, :] += ox[:, 0:min(ox.
shape[1], x.shape[1]), :, :]
x = self.actfun2(x)
if self.norm2:
x = self.norm2(x)
return x
class BabyUnet(nn.Module):
def __init__(self, n_channel_in=1, n_channel_out=1):
super(BabyUnet, self).__init__()
self.pool1 = nn.AvgPool2d(kernel_size=2)
self.pool2 = nn.AvgPool2d(kernel_size=2)
self.up1 = lambda x: F.interpolate(x, mode='bilinear', scale_factor=2)
self.up2 = lambda x: F.interpolate(x, mode='bilinear', scale_factor=2)
self.conv1 = ConvBlock(n_channel_in, 16)
self.conv2 = ConvBlock(16, 32)
self.conv3 = ConvBlock(32, 32)
self.conv4 = ConvBlock(64, 32)
self.conv5 = ConvBlock(48, 16)
self.conv6 = nn.Conv2d(16, n_channel_out, 1)
def forward(self, x):
c1 = self.conv1(x)
x = self.pool1(c1)
c2 = self.conv2(x)
x = self.pool2(c2)
self.conv3(x)
x = self.up1(x)
x = torch.cat([x, c2], 1)
x = self.conv4(x)
x = self.up2(x)
x = torch.cat([x, c1], 1)
x = self.conv5(x)
x = self.conv6(x)
return x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 17424
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 66
x1 = xindex // 66 % 66
x2 = xindex // 4356
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4095 + -1 * tl_math.abs(-63 + tl_math.abs(-1 +
x0)) + -64 * tl_math.abs(-63 + tl_math.abs(-1 + x1)) + 4096 * x2),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_reflection_pad2d_1(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 278784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 66
x1 = xindex // 66 % 66
x4 = xindex // 4356
x2 = xindex // 4356 % 16
x5 = xindex
tmp0 = tl.load(in_ptr0 + (4095 + -1 * tl_math.abs(-63 + tl_math.abs(-1 +
x0)) + -64 * tl_math.abs(-63 + tl_math.abs(-1 + x1)) + 4096 * x4),
xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x5, tmp7, xmask)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_2(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 16
x3 = xindex
x0 = xindex % 4096
x2 = xindex // 65536
tmp21 = tl.load(in_out_ptr0 + x3, None)
tmp22 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp0 = x1
tmp1 = tl.full([1], 1, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tmp2 & tmp2
tmp4 = tl.load(in_out_ptr0 + x3, tmp3, other=0.0)
tmp5 = tl.load(in_ptr0 + x1, tmp3, eviction_policy='evict_last', other=0.0)
tmp6 = tmp4 + tmp5
tmp7 = tl.load(in_ptr1 + (x0 + 4096 * x2), tmp3, eviction_policy=
'evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp3, tmp8, tmp9)
tmp11 = tl.load(in_out_ptr0 + x3, tmp2, other=0.0)
tmp12 = tl.load(in_ptr0 + x1, tmp2, eviction_policy='evict_last', other=0.0
)
tmp13 = tmp11 + tmp12
tmp14 = tl.where(tmp2, tmp10, tmp13)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp2, tmp14, tmp15)
tmp17 = tl.load(in_ptr1 + (x0 + 4096 * x2), tmp2, eviction_policy=
'evict_last', other=0.0)
tmp18 = tmp13 + tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp2, tmp18, tmp19)
tmp23 = tmp21 + tmp22
tmp24 = tl.where(tmp2, tmp20, tmp23)
tmp25 = tl.where(tmp2, tmp16, tmp24)
tmp26 = 0.0
tmp27 = tmp25 > tmp26
tmp28 = 0.01
tmp29 = tmp25 * tmp28
tmp30 = tl.where(tmp27, tmp25, tmp29)
tl.store(in_out_ptr0 + x3, tmp30, None)
@triton.jit
def triton_poi_fused_avg_pool2d_reflection_pad2d_3(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 73984
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 34
x1 = xindex // 34 % 34
x2 = xindex // 1156
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4030 + -128 * tl_math.abs(-31 + tl_math.abs(-
1 + x1)) + -2 * tl_math.abs(-31 + tl_math.abs(-1 + x0)) + 4096 * x2
), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (4031 + -128 * tl_math.abs(-31 + tl_math.abs(-
1 + x1)) + -2 * tl_math.abs(-31 + tl_math.abs(-1 + x0)) + 4096 * x2
), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4094 + -128 * tl_math.abs(-31 + tl_math.abs(-
1 + x1)) + -2 * tl_math.abs(-31 + tl_math.abs(-1 + x0)) + 4096 * x2
), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (4095 + -128 * tl_math.abs(-31 + tl_math.abs(-
1 + x1)) + -2 * tl_math.abs(-31 + tl_math.abs(-1 + x0)) + 4096 * x2
), xmask, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_reflection_pad2d_4(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 147968
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 34
x1 = xindex // 34 % 34
x4 = xindex // 1156
x2 = xindex // 1156 % 32
x5 = xindex
tmp0 = tl.load(in_ptr0 + (1023 + -1 * tl_math.abs(-31 + tl_math.abs(-1 +
x0)) + -32 * tl_math.abs(-31 + tl_math.abs(-1 + x1)) + 1024 * x4),
xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x5, tmp7, xmask)
@triton.jit
def triton_poi_fused_add_avg_pool2d_convolution_leaky_relu_5(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 1024 % 32
x5 = xindex
x0 = xindex % 32
x3 = xindex // 32768
x6 = xindex // 32 % 1024
tmp18 = tl.load(in_out_ptr0 + x5, None)
tmp19 = tl.load(in_ptr0 + x2, None, eviction_policy='evict_last')
tmp0 = x2
tmp1 = tl.full([1], 16, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_out_ptr0 + x5, tmp2, other=0.0)
tmp4 = tl.load(in_ptr0 + x2, tmp2, eviction_policy='evict_last', other=0.0)
tmp5 = tmp3 + tmp4
tmp6 = tl.load(in_ptr1 + (2 * x0 + 128 * x6 + 65536 * x3), tmp2,
eviction_policy='evict_last', other=0.0)
tmp7 = tl.load(in_ptr1 + (1 + 2 * x0 + 128 * x6 + 65536 * x3), tmp2,
eviction_policy='evict_last', other=0.0)
tmp8 = tmp7 + tmp6
tmp9 = tl.load(in_ptr1 + (64 + 2 * x0 + 128 * x6 + 65536 * x3), tmp2,
eviction_policy='evict_last', other=0.0)
tmp10 = tmp9 + tmp8
tmp11 = tl.load(in_ptr1 + (65 + 2 * x0 + 128 * x6 + 65536 * x3), tmp2,
eviction_policy='evict_last', other=0.0)
tmp12 = tmp11 + tmp10
tmp13 = 0.25
tmp14 = tmp12 * tmp13
tmp15 = tmp5 + tmp14
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp2, tmp15, tmp16)
tmp20 = tmp18 + tmp19
tmp21 = tl.where(tmp2, tmp17, tmp20)
tmp22 = tl.where(tmp2, tmp21, tmp21)
tmp23 = 0.0
tmp24 = tmp22 > tmp23
tmp25 = 0.01
tmp26 = tmp22 * tmp25
tmp27 = tl.where(tmp24, tmp22, tmp26)
tl.store(in_out_ptr0 + x5, tmp27, None)
@triton.jit
def triton_poi_fused_arange_6(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused__to_copy_7(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_clamp_8(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 15, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + x0, tmp12, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_clamp_mul_sub_9(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_avg_pool2d_mul_sub_10(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 32 % 32
x0 = xindex % 32
x2 = xindex // 1024
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr3 + x1, None, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp43 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 16, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (2 * tmp8 + 64 * tmp4 + 1024 * x2), None,
eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (1 + 2 * tmp8 + 64 * tmp4 + 1024 * x2), None,
eviction_policy='evict_last')
tmp11 = tmp10 + tmp9
tmp12 = tl.load(in_ptr2 + (32 + 2 * tmp8 + 64 * tmp4 + 1024 * x2), None,
eviction_policy='evict_last')
tmp13 = tmp12 + tmp11
tmp14 = tl.load(in_ptr2 + (33 + 2 * tmp8 + 64 * tmp4 + 1024 * x2), None,
eviction_policy='evict_last')
tmp15 = tmp14 + tmp13
tmp16 = 0.25
tmp17 = tmp15 * tmp16
tmp19 = tmp18 + tmp1
tmp20 = tmp18 < 0
tmp21 = tl.where(tmp20, tmp19, tmp18)
tmp22 = tl.load(in_ptr2 + (2 * tmp8 + 64 * tmp21 + 1024 * x2), None,
eviction_policy='evict_last')
tmp23 = tl.load(in_ptr2 + (1 + 2 * tmp8 + 64 * tmp21 + 1024 * x2), None,
eviction_policy='evict_last')
tmp24 = tmp23 + tmp22
tmp25 = tl.load(in_ptr2 + (32 + 2 * tmp8 + 64 * tmp21 + 1024 * x2),
None, eviction_policy='evict_last')
tmp26 = tmp25 + tmp24
tmp27 = tl.load(in_ptr2 + (33 + 2 * tmp8 + 64 * tmp21 + 1024 * x2),
None, eviction_policy='evict_last')
tmp28 = tmp27 + tmp26
tmp29 = tmp28 * tmp16
tmp31 = tmp30 + tmp1
tmp32 = tmp30 < 0
tmp33 = tl.where(tmp32, tmp31, tmp30)
tmp34 = tl.load(in_ptr2 + (2 * tmp33 + 64 * tmp21 + 1024 * x2), None,
eviction_policy='evict_last')
tmp35 = tl.load(in_ptr2 + (1 + 2 * tmp33 + 64 * tmp21 + 1024 * x2),
None, eviction_policy='evict_last')
tmp36 = tmp35 + tmp34
tmp37 = tl.load(in_ptr2 + (32 + 2 * tmp33 + 64 * tmp21 + 1024 * x2),
None, eviction_policy='evict_last')
tmp38 = tmp37 + tmp36
tmp39 = tl.load(in_ptr2 + (33 + 2 * tmp33 + 64 * tmp21 + 1024 * x2),
None, eviction_policy='evict_last')
tmp40 = tmp39 + tmp38
tmp41 = tmp40 * tmp16
tmp42 = tmp41 - tmp29
tmp44 = tmp42 * tmp43
tmp45 = tmp29 + tmp44
tmp46 = tl.load(in_ptr2 + (2 * tmp33 + 64 * tmp4 + 1024 * x2), None,
eviction_policy='evict_last')
tmp47 = tl.load(in_ptr2 + (1 + 2 * tmp33 + 64 * tmp4 + 1024 * x2), None,
eviction_policy='evict_last')
tmp48 = tmp47 + tmp46
tmp49 = tl.load(in_ptr2 + (32 + 2 * tmp33 + 64 * tmp4 + 1024 * x2),
None, eviction_policy='evict_last')
tmp50 = tmp49 + tmp48
tmp51 = tl.load(in_ptr2 + (33 + 2 * tmp33 + 64 * tmp4 + 1024 * x2),
None, eviction_policy='evict_last')
tmp52 = tmp51 + tmp50
tmp53 = tmp52 * tmp16
tmp54 = tmp53 - tmp17
tmp55 = tmp54 * tmp43
tmp56 = tmp17 + tmp55
tmp57 = tmp56 - tmp45
tl.store(in_out_ptr0 + x4, tmp45, None)
tl.store(in_out_ptr1 + x4, tmp57, None)
@triton.jit
def triton_poi_fused_cat_reflection_pad2d_11(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 295936
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 1156 % 64
x0 = xindex % 34
x1 = xindex // 34 % 34
x3 = xindex // 73984
x5 = xindex
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (1023 + -1 * tl_math.abs(-31 + tl_math.abs(-1 +
x0)) + -32 * tl_math.abs(-31 + tl_math.abs(-1 + x1)) + 1024 * x2 +
32768 * x3), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (1023 + -1 * tl_math.abs(-31 + tl_math.abs(-1 +
x0)) + -32 * tl_math.abs(-31 + tl_math.abs(-1 + x1)) + 1024 * x2 +
32768 * x3), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tl.load(in_ptr2 + (31 + -1 * tl_math.abs(-31 + tl_math.abs(-1 +
x1))), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 * tmp7
tmp9 = tmp5 + tmp8
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1], 64, tl.int64)
tmp15 = tl.load(in_ptr3 + (1023 + -1 * tl_math.abs(-31 + tl_math.abs(-1 +
x0)) + -32 * tl_math.abs(-31 + tl_math.abs(-1 + x1)) + 1024 * (-32 +
x2) + 32768 * x3), tmp12 & xmask, eviction_policy='evict_last',
other=0.0)
tmp16 = tl.where(tmp4, tmp11, tmp15)
tl.store(out_ptr0 + x5, tmp16, xmask)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_12(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x4 = xindex
x2 = xindex // 1024 % 32
x3 = xindex // 32768
x5 = xindex % 1024
x1 = xindex // 32 % 32
tmp0 = tl.load(in_out_ptr0 + x4, None)
tmp1 = tl.load(in_ptr0 + x2, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = x2
tl.full([1], 0, tl.int64)
tmp6 = tl.full([1], 32, tl.int64)
tmp7 = tmp3 < tmp6
tmp8 = tl.load(in_ptr1 + (x5 + 1024 * x2 + 32768 * x3), tmp7, other=0.0)
tmp9 = tl.load(in_ptr2 + (x5 + 1024 * x2 + 32768 * x3), tmp7, other=0.0)
tmp10 = tl.load(in_ptr3 + x1, tmp7, eviction_policy='evict_last', other=0.0
)
tmp11 = tmp9 * tmp10
tmp12 = tmp8 + tmp11
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp7, tmp12, tmp13)
tmp15 = tmp3 >= tmp6
tl.full([1], 64, tl.int64)
tmp18 = tl.load(in_ptr4 + (x5 + 1024 * (-32 + x2) + 32768 * x3), tmp15,
other=0.0)
tmp19 = tl.where(tmp7, tmp14, tmp18)
tmp20 = tmp2 + tmp19
tmp21 = 0.0
tmp22 = tmp20 > tmp21
tmp23 = 0.01
tmp24 = tmp20 * tmp23
tmp25 = tl.where(tmp22, tmp20, tmp24)
tmp26 = tmp25 > tmp21
tl.store(in_out_ptr0 + x4, tmp20, None)
tl.store(out_ptr0 + x4, tmp26, None)
@triton.jit
def triton_poi_fused_arange_13(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused__to_copy_14(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_clamp_15(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 31, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + x0, tmp12, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_clamp_mul_sub_16(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_leaky_relu_mul_sub_17(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 64 % 64
x0 = xindex % 64
x2 = xindex // 4096
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 32, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 32 * tmp4 + 1024 * x2), None,
eviction_policy='evict_last')
tmp10 = 0.0
tmp11 = tmp9 > tmp10
tmp12 = 0.01
tmp13 = tmp9 * tmp12
tmp14 = tl.where(tmp11, tmp9, tmp13)
tmp16 = tmp15 + tmp1
tmp17 = tmp15 < 0
tmp18 = tl.where(tmp17, tmp16, tmp15)
tmp19 = tl.load(in_ptr2 + (tmp18 + 32 * tmp4 + 1024 * x2), None,
eviction_policy='evict_last')
tmp20 = tmp19 > tmp10
tmp21 = tmp19 * tmp12
tmp22 = tl.where(tmp20, tmp19, tmp21)
tmp23 = tmp22 - tmp14
tmp25 = tmp23 * tmp24
tmp26 = tmp14 + tmp25
tl.store(out_ptr0 + x4, tmp26, None)
@triton.jit
def triton_poi_fused_cat_18(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 4096 % 48
x3 = xindex // 196608
x4 = xindex % 4096
x1 = xindex // 64 % 64
x0 = xindex % 64
x5 = xindex
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x4 + 4096 * x2 + 131072 * x3), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tl.full([XBLOCK], 32, tl.int32)
tmp8 = tmp6 + tmp7
tmp9 = tmp6 < 0
tmp10 = tl.where(tmp9, tmp8, tmp6)
tmp11 = tl.load(in_ptr2 + x0, tmp4, eviction_policy='evict_last', other=0.0
)
tmp12 = tmp11 + tmp7
tmp13 = tmp11 < 0
tmp14 = tl.where(tmp13, tmp12, tmp11)
tmp15 = tl.load(in_ptr3 + (tmp14 + 32 * tmp10 + 1024 * x2 + 32768 * x3),
tmp4, eviction_policy='evict_last', other=0.0)
tmp16 = 0.0
tmp17 = tmp15 > tmp16
tmp18 = 0.01
tmp19 = tmp15 * tmp18
tmp20 = tl.where(tmp17, tmp15, tmp19)
tmp21 = tl.load(in_ptr4 + x0, tmp4, eviction_policy='evict_last', other=0.0
)
tmp22 = tmp21 + tmp7
tmp23 = tmp21 < 0
tmp24 = tl.where(tmp23, tmp22, tmp21)
tmp25 = tl.load(in_ptr3 + (tmp24 + 32 * tmp10 + 1024 * x2 + 32768 * x3),
tmp4, eviction_policy='evict_last', other=0.0)
tmp26 = tmp25 > tmp16
tmp27 = tmp25 * tmp18
tmp28 = tl.where(tmp26, tmp25, tmp27)
tmp29 = tmp28 - tmp20
tmp30 = tl.load(in_ptr5 + x0, tmp4, eviction_policy='evict_last', other=0.0
)
tmp31 = tmp29 * tmp30
tmp32 = tmp20 + tmp31
tmp33 = tmp32 - tmp5
tmp34 = tl.load(in_ptr6 + x1, tmp4, eviction_policy='evict_last', other=0.0
)
tmp35 = tmp33 * tmp34
tmp36 = tmp5 + tmp35
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp4, tmp36, tmp37)
tmp39 = tmp0 >= tmp3
tl.full([1], 48, tl.int64)
tmp42 = tl.load(in_ptr7 + (x4 + 4096 * (-32 + x2) + 65536 * x3), tmp39,
other=0.0)
tmp43 = tl.where(tmp4, tmp38, tmp42)
tl.store(out_ptr0 + x5, tmp43, None)
@triton.jit
def triton_poi_fused_reflection_pad2d_19(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 836352
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 66
x1 = xindex // 66 % 66
x2 = xindex // 4356
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4095 + -1 * tl_math.abs(-63 + tl_math.abs(-1 +
x0)) + -64 * tl_math.abs(-63 + tl_math.abs(-1 + x1)) + 4096 * x2),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_20(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 16
x2 = xindex // 65536
x4 = xindex % 65536
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x4 + 196608 * x2), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 > tmp5
tmp7 = 0.01
tmp8 = tmp4 * tmp7
tmp9 = tl.where(tmp6, tmp4, tmp8)
tl.store(in_out_ptr0 + x3, tmp9, None)
@triton.jit
def triton_poi_fused_convolution_21(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_22(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 16
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(out_ptr0 + x3, tmp8, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_23(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 32
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(out_ptr0 + x3, tmp8, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22, primals_23
) = args
args.clear()
assert_size_stride(primals_1, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_2, (16, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_3, (16,), (1,))
assert_size_stride(primals_4, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (32, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_7, (32,), (1,))
assert_size_stride(primals_8, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_9, (32,), (1,))
assert_size_stride(primals_10, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_11, (32,), (1,))
assert_size_stride(primals_12, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_13, (32,), (1,))
assert_size_stride(primals_14, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_15, (32,), (1,))
assert_size_stride(primals_16, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_17, (32,), (1,))
assert_size_stride(primals_18, (16, 48, 3, 3), (432, 9, 3, 1))
assert_size_stride(primals_19, (16,), (1,))
assert_size_stride(primals_20, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_21, (16,), (1,))
assert_size_stride(primals_22, (1, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_23, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 66, 66), (4356, 4356, 66, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(17424)](primals_1, buf0,
17424, XBLOCK=128, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf2 = empty_strided_cuda((4, 16, 66, 66), (69696, 4356, 66, 1),
torch.float32)
triton_poi_fused_convolution_leaky_relu_reflection_pad2d_1[grid(278784)
](buf1, primals_3, buf2, 278784, XBLOCK=1024, num_warps=4,
num_stages=1)
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf4 = buf3
del buf3
triton_poi_fused_add_convolution_leaky_relu_2[grid(262144)](buf4,
primals_5, primals_1, 262144, XBLOCK=512, num_warps=8, num_stages=1
)
del primals_1
del primals_5
buf5 = empty_strided_cuda((4, 16, 34, 34), (18496, 1156, 34, 1),
torch.float32)
triton_poi_fused_avg_pool2d_reflection_pad2d_3[grid(73984)](buf4,
buf5, 73984, XBLOCK=1024, num_warps=4, num_stages=1)
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf7 = empty_strided_cuda((4, 32, 34, 34), (36992, 1156, 34, 1),
torch.float32)
triton_poi_fused_convolution_leaky_relu_reflection_pad2d_4[grid(147968)
](buf6, primals_7, buf7, 147968, XBLOCK=512, num_warps=8,
num_stages=1)
buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf9 = buf8
del buf8
buf10 = buf9
del buf9
triton_poi_fused_add_avg_pool2d_convolution_leaky_relu_5[grid(131072)](
buf10, primals_9, buf4, 131072, XBLOCK=512, num_warps=8,
num_stages=1)
del primals_9
buf11 = empty_strided_cuda((32,), (1,), torch.int64)
triton_poi_fused_arange_6[grid(32)](buf11, 32, XBLOCK=32, num_warps
=1, num_stages=1)
buf12 = empty_strided_cuda((32, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_7[grid(32)](buf12, 32, XBLOCK=32,
num_warps=1, num_stages=1)
buf13 = empty_strided_cuda((32, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_8[grid(32)](buf13, 32, XBLOCK=32,
num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((32,), (1,), torch.int64)
triton_poi_fused__to_copy_7[grid(32)](buf14, 32, XBLOCK=32,
num_warps=1, num_stages=1)
buf15 = empty_strided_cuda((32,), (1,), torch.int64)
triton_poi_fused_add_clamp_8[grid(32)](buf15, 32, XBLOCK=32,
num_warps=1, num_stages=1)
buf18 = empty_strided_cuda((32,), (1,), torch.float32)
triton_poi_fused__to_copy_add_clamp_mul_sub_9[grid(32)](buf18, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf17 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1),
torch.float32)
buf16 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1),
torch.float32)
buf19 = buf16
del buf16
buf21 = buf17
del buf17
triton_poi_fused__unsafe_index_add_avg_pool2d_mul_sub_10[grid(131072)](
buf19, buf21, buf13, buf14, buf10, buf12, buf15, buf18, 131072,
XBLOCK=512, num_warps=8, num_stages=1)
buf20 = empty_strided_cuda((32, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_add_clamp_mul_sub_9[grid(32)](buf20, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf22 = empty_strided_cuda((4, 64, 34, 34), (73984, 1156, 34, 1),
torch.float32)
triton_poi_fused_cat_reflection_pad2d_11[grid(295936)](buf19, buf21,
buf20, buf10, buf22, 295936, XBLOCK=1024, num_warps=4, num_stages=1
)
buf23 = extern_kernels.convolution(buf22, primals_14, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf23, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf24 = empty_strided_cuda((4, 32, 34, 34), (36992, 1156, 34, 1),
torch.float32)
triton_poi_fused_convolution_leaky_relu_reflection_pad2d_4[grid(147968)
](buf23, primals_15, buf24, 147968, XBLOCK=512, num_warps=8,
num_stages=1)
buf25 = extern_kernels.convolution(buf24, primals_16, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf26 = buf25
del buf25
buf44 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1),
torch.bool)
triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_12[grid
(131072)](buf26, primals_17, buf19, buf21, buf20, buf10, buf44,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del buf19
del buf21
del primals_17
buf27 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused_arange_13[grid(64)](buf27, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf28 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_14[grid(64)](buf28, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf29 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_15[grid(64)](buf29, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf30 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused__to_copy_14[grid(64)](buf30, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf31 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused_add_clamp_15[grid(64)](buf31, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf32 = empty_strided_cuda((64,), (1,), torch.float32)
triton_poi_fused__to_copy_add_clamp_mul_sub_16[grid(64)](buf32, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf33 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.float32)
triton_poi_fused__unsafe_index_add_leaky_relu_mul_sub_17[grid(524288)](
buf28, buf30, buf26, buf31, buf32, buf33, 524288, XBLOCK=1024,
num_warps=4, num_stages=1)
buf34 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_add_clamp_mul_sub_16[grid(64)](buf34, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf35 = empty_strided_cuda((4, 48, 64, 64), (196608, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_18[grid(786432)](buf33, buf29, buf30, buf26,
buf31, buf32, buf34, buf4, buf35, 786432, XBLOCK=512, num_warps
=8, num_stages=1)
del buf26
del buf33
buf36 = empty_strided_cuda((4, 48, 66, 66), (209088, 4356, 66, 1),
torch.float32)
triton_poi_fused_reflection_pad2d_19[grid(836352)](buf35, buf36,
836352, XBLOCK=1024, num_warps=4, num_stages=1)
buf37 = extern_kernels.convolution(buf36, primals_18, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf37, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf38 = empty_strided_cuda((4, 16, 66, 66), (69696, 4356, 66, 1),
torch.float32)
triton_poi_fused_convolution_leaky_relu_reflection_pad2d_1[grid(278784)
](buf37, primals_19, buf38, 278784, XBLOCK=1024, num_warps=4,
num_stages=1)
buf39 = extern_kernels.convolution(buf38, primals_20, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf39, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf40 = buf39
del buf39
triton_poi_fused_add_convolution_leaky_relu_20[grid(262144)](buf40,
primals_21, buf35, 262144, XBLOCK=512, num_warps=8, num_stages=1)
del buf35
del primals_21
buf41 = extern_kernels.convolution(buf40, primals_22, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf41, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf42 = buf41
del buf41
triton_poi_fused_convolution_21[grid(16384)](buf42, primals_23,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_23
buf43 = empty_strided_cuda((4, 16, 64, 64), (65536, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_22[grid
(262144)](buf37, primals_19, buf43, 262144, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf37
del primals_19
buf45 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_23[grid
(131072)](buf23, primals_15, buf45, 131072, XBLOCK=512,
num_warps=8, num_stages=1)
del buf23
del primals_15
buf46 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_23[grid
(131072)](buf6, primals_7, buf46, 131072, XBLOCK=512, num_warps
=8, num_stages=1)
del buf6
del primals_7
buf47 = empty_strided_cuda((4, 16, 64, 64), (65536, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_22[grid
(262144)](buf1, primals_3, buf47, 262144, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf1
del primals_3
return (buf42, primals_2, primals_4, primals_6, primals_8, primals_14,
primals_16, primals_18, primals_20, primals_22, buf0, buf2, buf4,
buf5, buf7, buf10, buf11, buf12, buf13, buf14, buf15, buf18, buf20,
buf22, buf24, buf27, buf28, buf29, buf30, buf31, buf32, buf34,
buf36, buf38, buf40, buf43, buf44, buf45, buf46, buf47)
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, dropout=False, norm=None,
residual=True, activation='leakyrelu', in_place_activation=True,
transpose=False, reflectpad=True):
super(ConvBlock, self).__init__()
self.dropout = dropout
self.residual = residual
self.activation = activation
self.transpose = transpose
self.reflectpad = reflectpad
if self.dropout:
self.dropout1 = nn.Dropout2d(p=0.05)
self.dropout2 = nn.Dropout2d(p=0.05)
self.norm1 = None
self.norm2 = None
if norm is not None:
if norm == 'batch':
self.norm1 = nn.BatchNorm2d(out_channels)
self.norm2 = nn.BatchNorm2d(out_channels)
elif norm == 'instance':
self.norm1 = nn.InstanceNorm2d(out_channels, affine=True)
self.norm2 = nn.InstanceNorm2d(out_channels, affine=True)
if self.transpose:
self.conv1 = nn.ConvTranspose2d(in_channels, out_channels,
kernel_size=3, padding=0 if self.reflectpad else 1)
self.conv2 = nn.ConvTranspose2d(out_channels, out_channels,
kernel_size=3, padding=0 if self.reflectpad else 1)
else:
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3,
padding=0 if self.reflectpad else 1)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=
3, padding=0 if self.reflectpad else 1)
if self.activation == 'relu':
self.actfun1 = nn.ReLU(inplace=in_place_activation)
self.actfun2 = nn.ReLU(inplace=in_place_activation)
elif self.activation == 'leakyrelu':
self.actfun1 = nn.LeakyReLU(inplace=in_place_activation)
self.actfun2 = nn.LeakyReLU(inplace=in_place_activation)
elif self.activation == 'elu':
self.actfun1 = nn.ELU(inplace=in_place_activation)
self.actfun2 = nn.ELU(inplace=in_place_activation)
elif self.activation == 'selu':
self.actfun1 = nn.SELU(inplace=in_place_activation)
self.actfun2 = nn.SELU(inplace=in_place_activation)
if self.reflectpad:
self.rpad1 = nn.ReflectionPad2d(1)
self.rpad2 = nn.ReflectionPad2d(1)
def forward(self, x):
ox = x
if self.reflectpad:
x = self.rpad1(x)
x = self.conv1(x)
if self.dropout:
x = self.dropout1(x)
x = self.actfun1(x)
if self.norm1:
x = self.norm1(x)
if self.reflectpad:
x = self.rpad2(x)
x = self.conv2(x)
if self.dropout:
x = self.dropout2(x)
if self.residual:
x[:, 0:min(ox.shape[1], x.shape[1]), :, :] += ox[:, 0:min(ox.
shape[1], x.shape[1]), :, :]
x = self.actfun2(x)
if self.norm2:
x = self.norm2(x)
return x
class BabyUnetNew(nn.Module):
def __init__(self, n_channel_in=1, n_channel_out=1):
super(BabyUnetNew, self).__init__()
self.pool1 = nn.AvgPool2d(kernel_size=2)
self.pool2 = nn.AvgPool2d(kernel_size=2)
self.up1 = lambda x: F.interpolate(x, mode='bilinear', scale_factor=2)
self.up2 = lambda x: F.interpolate(x, mode='bilinear', scale_factor=2)
self.conv1 = ConvBlock(n_channel_in, 16)
self.conv2 = ConvBlock(16, 32)
self.conv3 = ConvBlock(32, 32)
self.conv4 = ConvBlock(64, 32)
self.conv5 = ConvBlock(48, 16)
self.conv6 = nn.Conv2d(16, n_channel_out, 1)
def forward(self, input_0):
primals_2 = self.conv1.conv1.weight
primals_3 = self.conv1.conv1.bias
primals_4 = self.conv1.conv2.weight
primals_5 = self.conv1.conv2.bias
primals_6 = self.conv2.conv1.weight
primals_7 = self.conv2.conv1.bias
primals_8 = self.conv2.conv2.weight
primals_9 = self.conv2.conv2.bias
primals_10 = self.conv3.conv1.weight
primals_11 = self.conv3.conv1.bias
primals_12 = self.conv3.conv2.weight
primals_13 = self.conv3.conv2.bias
primals_14 = self.conv4.conv1.weight
primals_15 = self.conv4.conv1.bias
primals_16 = self.conv4.conv2.weight
primals_17 = self.conv4.conv2.bias
primals_18 = self.conv5.conv1.weight
primals_19 = self.conv5.conv1.bias
primals_20 = self.conv5.conv2.weight
primals_21 = self.conv5.conv2.bias
primals_22 = self.conv6.weight
primals_23 = self.conv6.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23])
return output[0]
| royerloic/aydin | BabyUnet | false | 16,393 | [
"BSD-3-Clause"
] | 78 | f9c61a24030891d008c318b250da5faec69fcd7d | https://github.com/royerloic/aydin/tree/f9c61a24030891d008c318b250da5faec69fcd7d |
CausualConv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/h5/ch52rh7o72g4gghg7x35d7a6gpapvkodjbo4j7nhkutmfi7ktvp5.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1], [2], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 8) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(2,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 8), (32, 8, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 128, grid=grid(128), stream=stream0)
del primals_2
return (reinterpret_tensor(buf1, (4, 4, 6), (32, 8, 1), 0), primals_1, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class CausualConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=1, dilation=1, bias=True, w_init_gain='linear', param=None):
super(CausualConv, self).__init__()
if padding is None:
assert kernel_size % 2 == 1
padding = int(dilation * (kernel_size - 1) / 2) * 2
else:
self.padding = padding * 2
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size=
kernel_size, stride=stride, padding=self.padding, dilation=
dilation, bias=bias)
torch.nn.init.xavier_uniform_(self.conv.weight, gain=torch.nn.init.
calculate_gain(w_init_gain, param=param))
def forward(self, x):
x = self.conv(x)
x = x[:, :, :-self.padding]
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 8 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,),
padding=(2,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 8), (32, 8, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(128)](buf1, primals_2, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return reinterpret_tensor(buf1, (4, 4, 6), (32, 8, 1), 0
), primals_1, primals_3
class CausualConvNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=1, dilation=1, bias=True, w_init_gain='linear', param=None):
super(CausualConvNew, self).__init__()
if padding is None:
assert kernel_size % 2 == 1
padding = int(dilation * (kernel_size - 1) / 2) * 2
else:
self.padding = padding * 2
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size=
kernel_size, stride=stride, padding=self.padding, dilation=
dilation, bias=bias)
torch.nn.init.xavier_uniform_(self.conv.weight, gain=torch.nn.init.
calculate_gain(w_init_gain, param=param))
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| shaun95/StarGANv2-VC | CausualConv | false | 16,394 | [
"MIT"
] | 116 | ed20538971a03d699351a349a3631767333baeb7 | https://github.com/shaun95/StarGANv2-VC/tree/ed20538971a03d699351a349a3631767333baeb7 |
PatchEmbedding | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/sr/csrhhqsexdcor6gq6tz4dawxblhadgekinzxxkt33uwojltligp6.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [4, 4], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 16, 16), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_3, 16, grid=grid(16), stream=stream0)
del primals_3
return (reinterpret_tensor(buf1, (4, 1, 4), (4, 1, 1), 0), primals_1, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class PatchEmbedding(nn.Module):
def __init__(self, image_size, patch_size, embed_dim, channels):
super().__init__()
self.image_size = image_size
if image_size[0] % patch_size != 0 or image_size[1] % patch_size != 0:
raise ValueError(
'image dimensions must be divisible by the patch size')
self.grid_size = image_size[0] // patch_size, image_size[1
] // patch_size
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.patch_size = patch_size
self.proj = nn.Conv2d(channels, embed_dim, kernel_size=patch_size,
stride=patch_size)
def forward(self, im):
_B, _C, _H, _W = im.shape
x = self.proj(im).flatten(2).transpose(1, 2)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'image_size': [4, 4], 'patch_size': 4, 'embed_dim': 4,
'channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(4,
4), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 16, 16), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16)](buf1, primals_3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
return reinterpret_tensor(buf1, (4, 1, 4), (4, 1, 1), 0
), primals_1, primals_2
class PatchEmbeddingNew(nn.Module):
def __init__(self, image_size, patch_size, embed_dim, channels):
super().__init__()
self.image_size = image_size
if image_size[0] % patch_size != 0 or image_size[1] % patch_size != 0:
raise ValueError(
'image dimensions must be divisible by the patch size')
self.grid_size = image_size[0] // patch_size, image_size[1
] // patch_size
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.patch_size = patch_size
self.proj = nn.Conv2d(channels, embed_dim, kernel_size=patch_size,
stride=patch_size)
def forward(self, input_0):
primals_1 = self.proj.weight
primals_3 = self.proj.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| shampooma/segmenter | PatchEmbedding | false | 16,395 | [
"MIT"
] | 418 | b08fd481da6758e37d108ba28676229b62f757aa | https://github.com/shampooma/segmenter/tree/b08fd481da6758e37d108ba28676229b62f757aa |
PositionWiseFCNetwork | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/hp/chpdwpegv6lvistek2wqgimtufecqvfp6grp5rpblk5yjicjzqd2.py
# Topologically Sorted Source Nodes: [sequences], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# sequences => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/lh/clhh73owbiuj4adasmetdqsot2nlmw2ljupnw2q4yt3du76mikww.py
# Topologically Sorted Source Nodes: [sequences], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# sequences => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {})
triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/gm/cgmflgdlpeeb52xctoa47uvw47ycyf7ahlj5wdscxdatpbwcboco.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5q/c5qmnkuxezgezseizmolw3mx24fyy6xp3cfoz3egpqwcprxgwjre.py
# Topologically Sorted Source Nodes: [sequences_3], Original ATen: [aten.add]
# Source node to ATen node mapping:
# sequences_3 => add_2
# Graph fragment:
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_1), kwargs = {})
triton_poi_fused_add_3 = async_compile.triton('triton_poi_fused_add_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [sequences], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(primals_1, buf0, buf1, 64, grid=grid(64), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sequences], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(primals_1, buf0, buf1, primals_2, primals_3, buf2, 256, grid=grid(256), stream=stream0)
del buf0
del buf1
del primals_2
del primals_3
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_2.run(buf4, primals_5, buf7, 256, grid=grid(256), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [sequences_3], Original ATen: [aten.add]
triton_poi_fused_add_3.run(buf6, primals_7, primals_1, 256, grid=grid(256), stream=stream0)
del primals_7
return (buf6, primals_1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(buf4, (64, 4), (4, 1), 0), primals_6, buf7, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.optim
import torch.utils.data
class PositionWiseFCNetwork(nn.Module):
"""
The Position-Wise Feed Forward Network sublayer.
"""
def __init__(self, d_model, d_inner, dropout):
"""
:param d_model: size of vectors throughout the transformer model, i.e. input and output sizes for this sublayer
:param d_inner: an intermediate size
:param dropout: dropout probability
"""
super(PositionWiseFCNetwork, self).__init__()
self.d_model = d_model
self.d_inner = d_inner
self.layer_norm = nn.LayerNorm(d_model)
self.fc1 = nn.Linear(d_model, d_inner)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(d_inner, d_model)
self.apply_dropout = nn.Dropout(dropout)
def forward(self, sequences):
"""
Forward prop.
:param sequences: input sequences, a tensor of size (N, pad_length, d_model)
:return: transformed output sequences, a tensor of size (N, pad_length, d_model)
"""
input_to_add = sequences.clone()
sequences = self.layer_norm(sequences)
sequences = self.apply_dropout(self.relu(self.fc1(sequences)))
sequences = self.fc2(sequences)
sequences = self.apply_dropout(sequences) + input_to_add
return sequences
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'd_inner': 4, 'dropout': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
import torch.optim
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(64)](primals_1, buf0,
buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(256)](primals_1, buf0,
buf1, primals_2, primals_3, buf2, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf0
del buf1
del primals_2
del primals_3
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(256)](buf4,
primals_5, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_add_3[grid(256)](buf6, primals_7, primals_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
return buf6, primals_1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0
), reinterpret_tensor(buf4, (64, 4), (4, 1), 0
), primals_6, buf7, primals_4
class PositionWiseFCNetworkNew(nn.Module):
"""
The Position-Wise Feed Forward Network sublayer.
"""
def __init__(self, d_model, d_inner, dropout):
"""
:param d_model: size of vectors throughout the transformer model, i.e. input and output sizes for this sublayer
:param d_inner: an intermediate size
:param dropout: dropout probability
"""
super(PositionWiseFCNetworkNew, self).__init__()
self.d_model = d_model
self.d_inner = d_inner
self.layer_norm = nn.LayerNorm(d_model)
self.fc1 = nn.Linear(d_model, d_inner)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(d_inner, d_model)
self.apply_dropout = nn.Dropout(dropout)
def forward(self, input_0):
primals_2 = self.layer_norm.weight
primals_3 = self.layer_norm.bias
primals_4 = self.fc1.weight
primals_5 = self.fc1.bias
primals_6 = self.fc2.weight
primals_7 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| sgrvinod/a-PyTorch-Tutorial-to-Machine-Translation | PositionWiseFCNetwork | false | 16,396 | [
"MIT"
] | 59 | a4dd7bc5554d11ac80355241f603dcaa24bc70ae | https://github.com/sgrvinod/a-PyTorch-Tutorial-to-Machine-Translation/tree/a4dd7bc5554d11ac80355241f603dcaa24bc70ae |
Model | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/uu/cuupb7yo7ai64qlwi4lpnlvdf3gw6jlp543potvhemy2bbjwrt5a.py
# Topologically Sorted Source Nodes: [mul, mul_1], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# mul_1 => mul_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %primals_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_3), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1073741824],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1073741824
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 16777216
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (16777216, ), (1, ))
assert_size_stride(primals_2, (4, 4, 4, 16777216), (268435456, 67108864, 16777216, 1))
assert_size_stride(primals_3, (16777216, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 16777216), (268435456, 67108864, 16777216, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, mul_1], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_2, primals_1, primals_3, buf0, 1073741824, grid=grid(1073741824), stream=stream0)
return (buf0, primals_1, primals_2, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((16777216, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 16777216), (268435456, 67108864, 16777216, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((16777216, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import torch
import torch.nn.functional
from torch.nn.parameter import Parameter
from torch.nn.modules import Module
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.parallel
import torch.optim
from torch.nn import Parameter
from torch.nn import Module
class Model(Module):
def __init__(self):
super(Model, self).__init__()
self.a = Parameter(torch.FloatTensor(4096 * 4096).fill_(1.0))
self.b = Parameter(torch.FloatTensor(4096 * 4096).fill_(2.0))
def forward(self, input):
return input * self.a * self.b
def get_inputs():
return [torch.rand([4, 4, 4, 16777216])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
import torch.nn.functional
from torch.nn.parameter import Parameter
from torch.nn.modules import Module
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.parallel
import torch.optim
from torch.nn import Parameter
from torch.nn import Module
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 16777216
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + x2, tmp4, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (16777216,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 16777216), (268435456, 67108864,
16777216, 1))
assert_size_stride(primals_3, (16777216,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 16777216), (268435456, 67108864,
16777216, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(1073741824)](primals_2, primals_1,
primals_3, buf0, 1073741824, XBLOCK=1024, num_warps=4, num_stages=1
)
return buf0, primals_1, primals_2, primals_3
class ModelNew(Module):
def __init__(self):
super(ModelNew, self).__init__()
self.a = Parameter(torch.FloatTensor(4096 * 4096).fill_(1.0))
self.b = Parameter(torch.FloatTensor(4096 * 4096).fill_(2.0))
def forward(self, input_0):
primals_1 = self.a
primals_3 = self.b
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| Cubbee/apex | Model | false | 16,397 | [
"BSD-3-Clause"
] | 268 | 0a991543846966d5f586540dc2441e512139e9fc | https://github.com/Cubbee/apex/tree/0a991543846966d5f586540dc2441e512139e9fc |
ChainCRF | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/uf/cufgro6ei5psr3fmvzjefledxxmwhzbvakxsxv3fntxzh2x3djaj.py
# Topologically Sorted Source Nodes: [output, output_1], Original ATen: [aten.add, aten.mul]
# Source node to ATen node mapping:
# output => add
# output_1 => mul
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_4, %unsqueeze), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %unsqueeze_2), kwargs = {})
triton_poi_fused_add_mul_0 = async_compile.triton('triton_poi_fused_add_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex % 25
x0 = xindex % 5
x2 = (xindex // 25)
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x0 + (5*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + (x2), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp8 = tmp6 * tmp7
tl.store(in_out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (5, 4), (4, 1))
assert_size_stride(primals_3, (5, ), (1, ))
assert_size_stride(primals_4, (25, 4), (4, 1))
assert_size_stride(primals_5, (25, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 5), (5, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 5), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 25), (25, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 25), (1, 4), 0), out=buf1)
del primals_4
buf2 = reinterpret_tensor(buf1, (4, 4, 5, 5), (100, 25, 5, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [output, output_1], Original ATen: [aten.add, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_0.run(buf2, primals_5, buf0, primals_3, primals_6, 400, grid=grid(400), stream=stream0)
del buf0
del primals_3
del primals_5
return (buf2, primals_6, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((5, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((5, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((25, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((25, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
def logsumexp(x, dim=None):
"""
Args:
x: A pytorch tensor (any dimension will do)
dim: int or None, over which to perform the summation. `None`, the
default, performs over all axes.
Returns: The result of the log(sum(exp(...))) operation.
"""
if dim is None:
xmax = x.max()
xmax_ = x.max()
return xmax_ + torch.log(torch.exp(x - xmax).sum())
else:
xmax, _ = x.max(dim, keepdim=True)
xmax_, _ = x.max(dim)
return xmax_ + torch.log(torch.exp(x - xmax).sum(dim))
class ChainCRF(nn.Module):
def __init__(self, input_size, num_labels, bigram=True):
"""
Args:
input_size: int
the dimension of the input.
num_labels: int
the number of labels of the crf layer
bigram: bool
if apply bi-gram parameter.
"""
super(ChainCRF, self).__init__()
self.input_size = input_size
self.num_labels = num_labels + 1
self.pad_label_id = num_labels
self.bigram = bigram
self.state_nn = nn.Linear(input_size, self.num_labels)
if bigram:
self.trans_nn = nn.Linear(input_size, self.num_labels * self.
num_labels)
self.register_parameter('trans_matrix', None)
else:
self.trans_nn = None
self.trans_matrix = Parameter(torch.Tensor(self.num_labels,
self.num_labels))
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.state_nn.bias, 0.0)
if self.bigram:
nn.init.xavier_uniform_(self.trans_nn.weight)
nn.init.constant_(self.trans_nn.bias, 0.0)
else:
nn.init.normal_(self.trans_matrix)
def forward(self, input, mask):
"""
Args:
input: Tensor
the input tensor with shape = [batch, length, input_size]
mask: Tensor
the mask tensor with shape = [batch, length]
Returns: Tensor
the energy tensor with shape = [batch, length, num_label, num_label]
"""
batch, length, _ = input.size()
out_s = self.state_nn(input).unsqueeze(2)
if self.bigram:
out_t = self.trans_nn(input).view(batch, length, self.
num_labels, self.num_labels)
output = out_t + out_s
else:
output = self.trans_matrix + out_s
output = output * mask.unsqueeze(2).unsqueeze(3)
return output
def loss(self, energy, target, mask):
"""
Args:
energy: Tensor
the energy tensor with shape = [batch, length, num_label, num_label]
target: Tensor
the tensor of target labels with shape [batch, length]
mask:Tensor
the mask tensor with shape = [batch, length]
Returns: Tensor
A 1D tensor for minus log likelihood loss
"""
batch, length = target.size()
energy_transpose = energy.transpose(0, 1)
target_transpose = target.transpose(0, 1)
mask_transpose = mask.unsqueeze(2).transpose(0, 1)
partition = None
batch_index = torch.arange(0, batch, dtype=torch.long, device=
target.device)
prev_label = torch.zeros(batch, dtype=torch.long, device=target.device)
prev_label = prev_label.fill_(self.num_labels - 1)
tgt_energy = torch.zeros(batch, device=target.device)
for t in range(length):
curr_energy = energy_transpose[t]
mask_t = mask_transpose[t]
if t == 0:
partition = curr_energy[:, -1, :]
else:
partition_new = logsumexp(curr_energy + partition.unsqueeze
(2), dim=1)
partition = partition + (partition_new - partition) * mask_t
tgt_energy += curr_energy[batch_index, prev_label,
target_transpose[t].data]
prev_label_new = target_transpose[t].data
prev_label = prev_label + (prev_label_new - prev_label
) * mask_t.squeeze(1).long()
return (logsumexp(partition, dim=1) - tgt_energy).mean()
def decode(self, energy, mask):
"""
Args:
energy: Tensor
the energy tensor with shape = [batch, length, num_label, num_label]
mask: Tensor
the mask tensor with shape = [batch, length]
Returns: Tensor
decoding results in shape [batch, length]
"""
energy_transpose = energy.transpose(0, 1)
mask_transpose = mask.unsqueeze(2).transpose(0, 1).long()
energy_transpose = energy_transpose[:, :, :-1, :-1]
length, batch_size, num_label, _ = energy_transpose.size()
batch_index = torch.arange(0, batch_size, dtype=torch.long, device=
energy.device)
pi = torch.zeros([length, batch_size, num_label, 1], device=energy.
device)
pointer = torch.zeros([length, batch_size, num_label], dtype=torch.
long, device=energy.device)
dummy_pointer = torch.arange(self.num_labels - 1, device=energy.device)
back_pointer = torch.zeros([length, batch_size], dtype=torch.long,
device=energy.device)
pi[0] = energy[:, 0, -1, :-1].unsqueeze(2)
pointer[0] = -1
for t in range(1, length):
pi_prev = pi[t - 1]
mask_t = mask_transpose[t]
pi_t, pointer_t = torch.max(energy_transpose[t] + pi_prev, dim=1)
pointer[t] = pointer_t * mask_t + dummy_pointer * (1 - mask_t)
pi[t] = (pi_t * mask_t).unsqueeze(2) + pi[t - 1] * (1 - mask_t.
unsqueeze(2))
_, back_pointer[-1] = torch.max(pi[-1].squeeze(2), dim=1)
for t in reversed(range(length - 1)):
pointer_last = pointer[t + 1]
back_pointer[t] = pointer_last[batch_index, back_pointer[t + 1]]
return back_pointer.transpose(0, 1)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'num_labels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
in_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex % 25
x0 = xindex % 5
x2 = xindex // 25
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x0 + 5 * x2), xmask, eviction_policy='evict_last'
)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp8 = tmp6 * tmp7
tl.store(in_out_ptr0 + x3, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (5, 4), (4, 1))
assert_size_stride(primals_3, (5,), (1,))
assert_size_stride(primals_4, (25, 4), (4, 1))
assert_size_stride(primals_5, (25,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 5), (5, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 5), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 25), (25, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 25), (1, 4), 0), out=buf1)
del primals_4
buf2 = reinterpret_tensor(buf1, (4, 4, 5, 5), (100, 25, 5, 1), 0)
del buf1
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(400)](buf2, primals_5, buf0,
primals_3, primals_6, 400, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del primals_3
del primals_5
return buf2, primals_6, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0)
def logsumexp(x, dim=None):
"""
Args:
x: A pytorch tensor (any dimension will do)
dim: int or None, over which to perform the summation. `None`, the
default, performs over all axes.
Returns: The result of the log(sum(exp(...))) operation.
"""
if dim is None:
xmax = x.max()
xmax_ = x.max()
return xmax_ + torch.log(torch.exp(x - xmax).sum())
else:
xmax, _ = x.max(dim, keepdim=True)
xmax_, _ = x.max(dim)
return xmax_ + torch.log(torch.exp(x - xmax).sum(dim))
class ChainCRFNew(nn.Module):
def __init__(self, input_size, num_labels, bigram=True):
"""
Args:
input_size: int
the dimension of the input.
num_labels: int
the number of labels of the crf layer
bigram: bool
if apply bi-gram parameter.
"""
super(ChainCRFNew, self).__init__()
self.input_size = input_size
self.num_labels = num_labels + 1
self.pad_label_id = num_labels
self.bigram = bigram
self.state_nn = nn.Linear(input_size, self.num_labels)
if bigram:
self.trans_nn = nn.Linear(input_size, self.num_labels * self.
num_labels)
self.register_parameter('trans_matrix', None)
else:
self.trans_nn = None
self.trans_matrix = Parameter(torch.Tensor(self.num_labels,
self.num_labels))
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.state_nn.bias, 0.0)
if self.bigram:
nn.init.xavier_uniform_(self.trans_nn.weight)
nn.init.constant_(self.trans_nn.bias, 0.0)
else:
nn.init.normal_(self.trans_matrix)
def loss(self, energy, target, mask):
"""
Args:
energy: Tensor
the energy tensor with shape = [batch, length, num_label, num_label]
target: Tensor
the tensor of target labels with shape [batch, length]
mask:Tensor
the mask tensor with shape = [batch, length]
Returns: Tensor
A 1D tensor for minus log likelihood loss
"""
batch, length = target.size()
energy_transpose = energy.transpose(0, 1)
target_transpose = target.transpose(0, 1)
mask_transpose = mask.unsqueeze(2).transpose(0, 1)
partition = None
batch_index = torch.arange(0, batch, dtype=torch.long, device=
target.device)
prev_label = torch.zeros(batch, dtype=torch.long, device=target.device)
prev_label = prev_label.fill_(self.num_labels - 1)
tgt_energy = torch.zeros(batch, device=target.device)
for t in range(length):
curr_energy = energy_transpose[t]
mask_t = mask_transpose[t]
if t == 0:
partition = curr_energy[:, -1, :]
else:
partition_new = logsumexp(curr_energy + partition.unsqueeze
(2), dim=1)
partition = partition + (partition_new - partition) * mask_t
tgt_energy += curr_energy[batch_index, prev_label,
target_transpose[t].data]
prev_label_new = target_transpose[t].data
prev_label = prev_label + (prev_label_new - prev_label
) * mask_t.squeeze(1).long()
return (logsumexp(partition, dim=1) - tgt_energy).mean()
def decode(self, energy, mask):
"""
Args:
energy: Tensor
the energy tensor with shape = [batch, length, num_label, num_label]
mask: Tensor
the mask tensor with shape = [batch, length]
Returns: Tensor
decoding results in shape [batch, length]
"""
energy_transpose = energy.transpose(0, 1)
mask_transpose = mask.unsqueeze(2).transpose(0, 1).long()
energy_transpose = energy_transpose[:, :, :-1, :-1]
length, batch_size, num_label, _ = energy_transpose.size()
batch_index = torch.arange(0, batch_size, dtype=torch.long, device=
energy.device)
pi = torch.zeros([length, batch_size, num_label, 1], device=energy.
device)
pointer = torch.zeros([length, batch_size, num_label], dtype=torch.
long, device=energy.device)
dummy_pointer = torch.arange(self.num_labels - 1, device=energy.device)
back_pointer = torch.zeros([length, batch_size], dtype=torch.long,
device=energy.device)
pi[0] = energy[:, 0, -1, :-1].unsqueeze(2)
pointer[0] = -1
for t in range(1, length):
pi_prev = pi[t - 1]
mask_t = mask_transpose[t]
pi_t, pointer_t = torch.max(energy_transpose[t] + pi_prev, dim=1)
pointer[t] = pointer_t * mask_t + dummy_pointer * (1 - mask_t)
pi[t] = (pi_t * mask_t).unsqueeze(2) + pi[t - 1] * (1 - mask_t.
unsqueeze(2))
_, back_pointer[-1] = torch.max(pi[-1].squeeze(2), dim=1)
for t in reversed(range(length - 1)):
pointer_last = pointer[t + 1]
back_pointer[t] = pointer_last[batch_index, back_pointer[t + 1]]
return back_pointer.transpose(0, 1)
def forward(self, input_0, input_1):
primals_2 = self.state_nn.weight
primals_3 = self.state_nn.bias
primals_4 = self.trans_nn.weight
primals_5 = self.trans_nn.bias
primals_1 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| shabnam-b/crosslingual-nlp | ChainCRF | false | 16,398 | [
"MIT"
] | 64 | ccd91baaea23004eab9c4d871910945ca3e61ab7 | https://github.com/shabnam-b/crosslingual-nlp/tree/ccd91baaea23004eab9c4d871910945ca3e61ab7 |
SoftAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/vf/cvfa3cofzrcnaun6ejxtaivamsfl7ovqh6u4hauzn6irdxuskl6i.py
# Topologically Sorted Source Nodes: [e], Original ATen: [aten.mv]
# Source node to ATen node mapping:
# e => mul, sum_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, %primals_4), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
triton_poi_fused_mv_0 = async_compile.triton('triton_poi_fused_mv_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mv_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mv_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (0))
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp5 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1))
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp11 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (2))
tmp14 = tl.broadcast_to(tmp13, [XBLOCK])
tmp17 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (3))
tmp20 = tl.broadcast_to(tmp19, [XBLOCK])
tmp1 = libdevice.tanh(tmp0)
tmp4 = tmp1 * tmp3
tmp6 = libdevice.tanh(tmp5)
tmp9 = tmp6 * tmp8
tmp10 = tmp4 + tmp9
tmp12 = libdevice.tanh(tmp11)
tmp15 = tmp12 * tmp14
tmp16 = tmp10 + tmp15
tmp18 = libdevice.tanh(tmp17)
tmp21 = tmp18 * tmp20
tmp22 = tmp16 + tmp21
tl.store(out_ptr0 + (x0), tmp22, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/hz/chzi3aam26mikdhljz5x7jlqazm7kpktzeptsf36thgfhsg7ub6a.py
# Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention_weights => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%squeeze, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%squeeze, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/em/cem6qbxwbiqnjqybzk5arf2obt5uggy4qs7otwwpovvnrhvdc6h4.py
# Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention_weights => div, sum_2
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_2), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/uc/cucxlvpmdnkvrjowwgishm572wj6r3vegfneh3wrtn5kchw2jp55.py
# Topologically Sorted Source Nodes: [weighted, sum_1, representations], Original ATen: [aten.mul, aten.sum, aten.squeeze]
# Source node to ATen node mapping:
# representations => squeeze_1
# sum_1 => sum_3
# weighted => mul_1
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_1, %expand), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [2]), kwargs = {})
# %squeeze_1 : [num_users=1] = call_function[target=torch.ops.aten.squeeze.default](args = (%sum_3,), kwargs = {})
triton_poi_fused_mul_squeeze_sum_3 = async_compile.triton('triton_poi_fused_mul_squeeze_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_squeeze_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_squeeze_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16)
x3 = xindex % 16
x0 = xindex % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + (64*x2)), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x3 + (64*x2)), xmask)
tmp4 = tl.load(in_ptr1 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (32 + x3 + (64*x2)), xmask)
tmp8 = tl.load(in_ptr1 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x3 + (64*x2)), xmask)
tmp12 = tl.load(in_ptr1 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + (x4), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [weights], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((64, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [e], Original ATen: [aten.mv]
stream0 = get_raw_stream(0)
triton_poi_fused_mv_0.run(buf0, primals_4, buf1, 64, grid=grid(64), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0)
buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf2, buf3, 64, grid=grid(64), stream=stream0)
buf4 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [weighted, sum_1, representations], Original ATen: [aten.mul, aten.sum, aten.squeeze]
triton_poi_fused_mul_squeeze_sum_3.run(primals_1, buf3, buf4, 64, grid=grid(64), stream=stream0)
del buf3
return (buf4, primals_1, primals_4, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
class SoftAttention(nn.Module):
"""
https://arxiv.org/abs/1803.10916
"""
def __init__(self, emb_dim, attn_dim):
super().__init__()
self.attn_dim = attn_dim
self.emb_dim = emb_dim
self.W = torch.nn.Linear(self.emb_dim, self.attn_dim)
self.v = nn.Parameter(torch.Tensor(self.attn_dim), requires_grad=True)
stdv = 1.0 / np.sqrt(self.attn_dim)
for weight in self.v:
nn.init.uniform_(weight, -stdv, stdv)
def forward(self, values):
attention_weights = self._get_weights(values)
values = values.transpose(1, 2)
weighted = torch.mul(values, attention_weights.unsqueeze(1).
expand_as(values))
representations = weighted.sum(2).squeeze()
return representations
def _get_weights(self, values):
values.size(0)
weights = self.W(values)
weights = torch.tanh(weights)
e = weights @ self.v
attention_weights = torch.softmax(e.squeeze(1), dim=-1)
return attention_weights
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'emb_dim': 4, 'attn_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mv_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + 1)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp11 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr1 + 2)
tmp14 = tl.broadcast_to(tmp13, [XBLOCK])
tmp17 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + 3)
tmp20 = tl.broadcast_to(tmp19, [XBLOCK])
tmp1 = libdevice.tanh(tmp0)
tmp4 = tmp1 * tmp3
tmp6 = libdevice.tanh(tmp5)
tmp9 = tmp6 * tmp8
tmp10 = tmp4 + tmp9
tmp12 = libdevice.tanh(tmp11)
tmp15 = tmp12 * tmp14
tmp16 = tmp10 + tmp15
tmp18 = libdevice.tanh(tmp17)
tmp21 = tmp18 * tmp20
tmp22 = tmp16 + tmp21
tl.store(out_ptr0 + x0, tmp22, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_squeeze_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x3 = xindex % 16
x0 = xindex % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + 64 * x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x3 + 64 * x2), xmask)
tmp4 = tl.load(in_ptr1 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (32 + x3 + 64 * x2), xmask)
tmp8 = tl.load(in_ptr1 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x3 + 64 * x2), xmask)
tmp12 = tl.load(in_ptr1 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x4, tmp14, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((64,), (1,), torch.float32)
get_raw_stream(0)
triton_poi_fused_mv_0[grid(64)](buf0, primals_4, buf1, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
triton_poi_fused__softmax_2[grid(64)](buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = buf2
del buf2
triton_poi_fused_mul_squeeze_sum_3[grid(64)](primals_1, buf3, buf4,
64, XBLOCK=64, num_warps=1, num_stages=1)
del buf3
return buf4, primals_1, primals_4, buf0
class SoftAttentionNew(nn.Module):
"""
https://arxiv.org/abs/1803.10916
"""
def __init__(self, emb_dim, attn_dim):
super().__init__()
self.attn_dim = attn_dim
self.emb_dim = emb_dim
self.W = torch.nn.Linear(self.emb_dim, self.attn_dim)
self.v = nn.Parameter(torch.Tensor(self.attn_dim), requires_grad=True)
stdv = 1.0 / np.sqrt(self.attn_dim)
for weight in self.v:
nn.init.uniform_(weight, -stdv, stdv)
def _get_weights(self, values):
values.size(0)
weights = self.W(values)
weights = torch.tanh(weights)
e = weights @ self.v
attention_weights = torch.softmax(e.squeeze(1), dim=-1)
return attention_weights
def forward(self, input_0):
primals_3 = self.v
primals_2 = self.W.weight
primals_4 = self.W.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| shangeth/wavencoder | SoftAttention | false | 16,399 | [
"MIT"
] | 56 | cd1a277c2cc44075c9f4506e344b3a725ad5b9fe | https://github.com/shangeth/wavencoder/tree/cd1a277c2cc44075c9f4506e344b3a725ad5b9fe |
TimeStrech | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/z2/cz24gi4s7xzyox5sbvrpri4ak7fy6xgopry5wbciybyk7nkbnbwl.py
# Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten._to_copy, aten.arange, aten.add, aten.mul, aten.sub, aten.clamp, aten._unsafe_index]
# Source node to ATen node mapping:
# interpolate => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add_2, add_4, add_5, add_6, clamp_max_2, clamp_max_3, clamp_min_1, clamp_min_2, clamp_min_3, convert_element_type_1, convert_element_type_2, convert_element_type_3, iota_1, mul_1, mul_2, mul_3, mul_4, sub_1, sub_2, sub_3, sub_4, sub_5, sub_6
# Graph fragment:
# %convert_element_type_1 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {})
# %iota_1 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type_2 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_1, torch.float32), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_2, 0.5), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 1.0), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, 0.5), kwargs = {})
# %clamp_min_1 : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_1, 0.0), kwargs = {})
# %convert_element_type_3 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%clamp_min_1, torch.int64), kwargs = {})
# %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max, %clamp_max_1]), kwargs = {})
# %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_1, %convert_element_type_3), kwargs = {})
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {})
# %clamp_max_2 : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_2), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_3), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {})
# %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %clamp_max_2), kwargs = {})
# %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_2), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %add_4), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %convert_element_type_1), kwargs = {})
# %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_5, 0.0), kwargs = {})
# %clamp_max_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_3, 1.0), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %clamp_max_3), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %mul_4), kwargs = {})
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0 = async_compile.triton('triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x0 = xindex % 4
x2 = (xindex // 16)
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tl.full([1], 1, tl.int64)
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tmp14 = x0
tmp15 = tmp14.to(tl.float32)
tmp16 = tmp15 + tmp2
tmp17 = tmp16 * tmp4
tmp18 = tmp17 - tmp2
tmp19 = triton_helpers.maximum(tmp18, tmp7)
tmp20 = tmp19.to(tl.int32)
tmp21 = tmp20 + tmp10
tmp22 = triton_helpers.minimum(tmp21, tmp12)
tmp23 = tl.load(in_ptr0 + (tmp22 + (4*tmp13) + (16*x2)), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr0 + (tmp20 + (4*tmp13) + (16*x2)), xmask, eviction_policy='evict_last')
tmp25 = tmp23 - tmp24
tmp26 = tmp20.to(tl.float32)
tmp27 = tmp19 - tmp26
tmp28 = triton_helpers.maximum(tmp27, tmp7)
tmp29 = triton_helpers.minimum(tmp28, tmp4)
tmp30 = tmp25 * tmp29
tmp31 = tmp24 + tmp30
tmp32 = tl.load(in_ptr0 + (tmp20 + (4*tmp9) + (16*x2)), xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr0 + (tmp22 + (4*tmp9) + (16*x2)), xmask, eviction_policy='evict_last')
tmp34 = tmp33 - tmp32
tmp35 = tmp34 * tmp29
tmp36 = tmp32 + tmp35
tmp37 = tmp31 - tmp36
tmp38 = tmp9.to(tl.float32)
tmp39 = tmp8 - tmp38
tmp40 = triton_helpers.maximum(tmp39, tmp7)
tmp41 = triton_helpers.minimum(tmp40, tmp4)
tmp42 = tmp37 * tmp41
tmp43 = tmp36 + tmp42
tl.store(in_out_ptr0 + (x4), tmp43, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = buf0; del buf0 # reuse
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten._to_copy, aten.arange, aten.add, aten.mul, aten.sub, aten.clamp, aten._unsafe_index]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0.run(buf2, arg0_1, 256, grid=grid(256), stream=stream0)
del arg0_1
return (reinterpret_tensor(buf2, (4, 1, 4, 4, 4), (64, 64, 16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import random
import torch
from torch import nn
import torch.nn.functional as F
class TimeStrech(nn.Module):
def __init__(self, scale):
super(TimeStrech, self).__init__()
self.scale = scale
def forward(self, x):
mel_size = x.size(-1)
x = F.interpolate(x, scale_factor=(1, self.scale), align_corners=
False, recompute_scale_factor=True, mode='bilinear').squeeze()
if x.size(-1) < mel_size:
noise_length = mel_size - x.size(-1)
random_pos = random.randint(0, x.size(-1)) - noise_length
if random_pos < 0:
random_pos = 0
noise = x[..., random_pos:random_pos + noise_length]
x = torch.cat([x, noise], dim=-1)
else:
x = x[..., :mel_size]
return x.unsqueeze(1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'scale': 1.0}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(
in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x2 = xindex // 16
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tl.full([1], 1, tl.int64)
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tmp14 = x0
tmp15 = tmp14.to(tl.float32)
tmp16 = tmp15 + tmp2
tmp17 = tmp16 * tmp4
tmp18 = tmp17 - tmp2
tmp19 = triton_helpers.maximum(tmp18, tmp7)
tmp20 = tmp19.to(tl.int32)
tmp21 = tmp20 + tmp10
tmp22 = triton_helpers.minimum(tmp21, tmp12)
tmp23 = tl.load(in_ptr0 + (tmp22 + 4 * tmp13 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp24 = tl.load(in_ptr0 + (tmp20 + 4 * tmp13 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp25 = tmp23 - tmp24
tmp26 = tmp20.to(tl.float32)
tmp27 = tmp19 - tmp26
tmp28 = triton_helpers.maximum(tmp27, tmp7)
tmp29 = triton_helpers.minimum(tmp28, tmp4)
tmp30 = tmp25 * tmp29
tmp31 = tmp24 + tmp30
tmp32 = tl.load(in_ptr0 + (tmp20 + 4 * tmp9 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp33 = tl.load(in_ptr0 + (tmp22 + 4 * tmp9 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp34 = tmp33 - tmp32
tmp35 = tmp34 * tmp29
tmp36 = tmp32 + tmp35
tmp37 = tmp31 - tmp36
tmp38 = tmp9.to(tl.float32)
tmp39 = tmp8 - tmp38
tmp40 = triton_helpers.maximum(tmp39, tmp7)
tmp41 = triton_helpers.minimum(tmp40, tmp4)
tmp42 = tmp37 * tmp41
tmp43 = tmp36 + tmp42
tl.store(in_out_ptr0 + x4, tmp43, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = buf0
del buf0
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid
(256)](buf2, arg0_1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf2, (4, 1, 4, 4, 4), (64, 64, 16, 4, 1), 0),
class TimeStrechNew(nn.Module):
def __init__(self, scale):
super(TimeStrechNew, self).__init__()
self.scale = scale
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| shaun95/StarGANv2-VC | TimeStrech | false | 16,400 | [
"MIT"
] | 116 | ed20538971a03d699351a349a3631767333baeb7 | https://github.com/shaun95/StarGANv2-VC/tree/ed20538971a03d699351a349a3631767333baeb7 |
ConvEncoder3D | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/y3/cy3fg3twnrknr5mjnhabp7jbyk2kjbnvec345wejszxzqi4gvbvq.py
# Topologically Sorted Source Nodes: [net], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# net => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1, 2, 2], [1, 1, 1], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 147456
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 4096
x1 = (xindex // 4096) % 3
x2 = (xindex // 12288) % 3
x3 = (xindex // 36864)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4096*x2) + (12288*x1) + (36864*x3)), None)
tl.store(out_ptr0 + (x4), tmp0, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/u7/cu7vt3m5gl7iwemaheqiu5hdhl5wgokkvgjbhid6dwe27cawif7w.py
# Topologically Sorted Source Nodes: [net, relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# net => convolution
# relu => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1, 2, 2], [1, 1, 1], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 393216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 3072) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/vd/cvdb2pqvjvzobqaw4di5vynhhmn7qfo4zlm5dqfxpafrkpcmtc75.py
# Topologically Sorted Source Nodes: [net_1, relu_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# net_1 => convolution_1
# relu_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [2, 2, 2], [1, 1, 1], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 512) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/qs/cqsksyy2erxzg3k5ruat4wgc76uug7ofwwxv2xyxhqvqldlzczak.py
# Topologically Sorted Source Nodes: [net_2, relu_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# net_2 => convolution_2
# relu_2 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 2, 2], [1, 1, 1], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_3 = async_compile.triton('triton_poi_fused_convolution_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 128) % 128
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ey/ceydargzeq5vvn7j3vkhdrse7fnvywlyuntcacdesojmdop2xve7.py
# Topologically Sorted Source Nodes: [net_3, relu_3], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# net_3 => convolution_3
# relu_3 => relu_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [2, 2, 2], [1, 1, 1], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 256
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/u7/cu7l3awf5agkqaopycpbxf2vaafkaocqm557toaofw3pq6mefb2o.py
# Topologically Sorted Source Nodes: [net_4, relu_4], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# net_4 => convolution_4
# relu_4 => relu_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_3, %primals_10, %primals_11, [2, 2, 2], [1, 1, 1], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
triton_poi_fused_convolution_relu_5 = async_compile.triton('triton_poi_fused_convolution_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4) % 512
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/7i/c7itdndukfgx3t4ude4otmaulm5pppgvhj3fy2s6svsn6bivzwr7.py
# Topologically Sorted Source Nodes: [net_6, relu_5], Original ATen: [aten.mean, aten.relu]
# Source node to ATen node mapping:
# net_6 => mean
# relu_5 => relu_5
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [2]), kwargs = {})
# %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%mean,), kwargs = {})
triton_poi_fused_mean_relu_6 = async_compile.triton('triton_poi_fused_mean_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 / tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(in_out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15 = args
args.clear()
assert_size_stride(primals_1, (4, 3, 3, 64, 64), (36864, 12288, 4096, 64, 1))
assert_size_stride(primals_2, (32, 3, 3, 3, 3), (81, 27, 9, 3, 1))
assert_size_stride(primals_3, (32, ), (1, ))
assert_size_stride(primals_4, (64, 32, 3, 3, 3), (864, 27, 9, 3, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (128, 64, 3, 3, 3), (1728, 27, 9, 3, 1))
assert_size_stride(primals_7, (128, ), (1, ))
assert_size_stride(primals_8, (256, 128, 3, 3, 3), (3456, 27, 9, 3, 1))
assert_size_stride(primals_9, (256, ), (1, ))
assert_size_stride(primals_10, (512, 256, 3, 3, 3), (6912, 27, 9, 3, 1))
assert_size_stride(primals_11, (512, ), (1, ))
assert_size_stride(primals_12, (512, 512, 3, 3, 3), (13824, 27, 9, 3, 1))
assert_size_stride(primals_13, (512, ), (1, ))
assert_size_stride(primals_14, (128, 512), (512, 1))
assert_size_stride(primals_15, (128, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 3, 64, 64), (36864, 12288, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [net], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(primals_1, buf0, 147456, grid=grid(147456), stream=stream0)
# Topologically Sorted Source Nodes: [net], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 2, 2), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 32, 3, 32, 32), (98304, 3072, 1024, 32, 1))
del buf0
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [net, relu], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf2, primals_3, 393216, grid=grid(393216), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [net_1], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(2, 2, 2), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 64, 2, 16, 16), (32768, 512, 256, 16, 1))
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [net_1, relu_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf4, primals_5, 131072, grid=grid(131072), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [net_2], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf4, primals_6, stride=(1, 2, 2), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 128, 2, 8, 8), (16384, 128, 64, 8, 1))
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [net_2, relu_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_3.run(buf6, primals_7, 65536, grid=grid(65536), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [net_3], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf6, primals_8, stride=(2, 2, 2), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 256, 1, 4, 4), (4096, 16, 16, 4, 1))
buf8 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [net_3, relu_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf8, primals_9, 16384, grid=grid(16384), stream=stream0)
del primals_9
# Topologically Sorted Source Nodes: [net_4], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf8, primals_10, stride=(2, 2, 2), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 512, 1, 2, 2), (2048, 4, 4, 2, 1))
buf10 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [net_4, relu_4], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_5.run(buf10, primals_11, 8192, grid=grid(8192), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [net_5], Original ATen: [aten.convolution]
buf11 = extern_kernels.convolution(buf10, primals_12, stride=(2, 2, 2), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 512, 1, 1, 1), (512, 1, 1, 1, 1))
buf12 = reinterpret_tensor(buf11, (4, 512), (512, 1), 0); del buf11 # reuse
# Topologically Sorted Source Nodes: [net_6, relu_5], Original ATen: [aten.mean, aten.relu]
triton_poi_fused_mean_relu_6.run(buf12, primals_13, 2048, grid=grid(2048), stream=stream0)
del primals_13
buf13 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_15, buf12, reinterpret_tensor(primals_14, (512, 128), (1, 512), 0), alpha=1, beta=1, out=buf13)
del primals_15
return (buf13, primals_2, primals_4, primals_6, primals_8, primals_10, primals_12, reinterpret_tensor(primals_1, (4, 3, 3, 64, 64), (36864, 4096, 12288, 64, 1), 0), buf2, buf4, buf6, buf8, buf10, buf12, primals_14, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 3, 3, 64, 64), (36864, 12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, 3, 3, 3, 3), (81, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 32, 3, 3, 3), (864, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, 64, 3, 3, 3), (1728, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((256, 128, 3, 3, 3), (3456, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((512, 256, 3, 3, 3), (6912, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((512, 512, 3, 3, 3), (13824, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((128, 512), (512, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from matplotlib import cm as cm
import torch.nn as nn
class ConvEncoder3D(nn.Module):
""" Simple convolutional conditioning network.
It consists of 6 convolutional layers, each downsampling the input by a
factor of 2, and a final fully-connected layer projecting the output to
c_dim dimensions.
"""
def __init__(self, c_dim=128, hidden_dim=32, **kwargs):
""" Initialisation.
Args:
c_dim (int): output dimension of the latent embedding
"""
super().__init__()
self.conv0 = nn.Conv3d(3, hidden_dim, 3, stride=(1, 2, 2), padding=1)
self.conv1 = nn.Conv3d(hidden_dim, hidden_dim * 2, 3, stride=(2, 2,
2), padding=1)
self.conv2 = nn.Conv3d(hidden_dim * 2, hidden_dim * 4, 3, stride=(1,
2, 2), padding=1)
self.conv3 = nn.Conv3d(hidden_dim * 4, hidden_dim * 8, 3, stride=(2,
2, 2), padding=1)
self.conv4 = nn.Conv3d(hidden_dim * 8, hidden_dim * 16, 3, stride=(
2, 2, 2), padding=1)
self.conv5 = nn.Conv3d(hidden_dim * 16, hidden_dim * 16, 3, stride=
(2, 2, 2), padding=1)
self.fc_out = nn.Linear(hidden_dim * 16, c_dim)
self.actvn = nn.ReLU()
def forward(self, x):
x = x.transpose(1, 2)
batch_size = x.size(0)
net = self.conv0(x)
net = self.conv1(self.actvn(net))
net = self.conv2(self.actvn(net))
net = self.conv3(self.actvn(net))
net = self.conv4(self.actvn(net))
net = self.conv5(self.actvn(net))
final_dim = net.shape[1]
net = net.view(batch_size, final_dim, -1).mean(2)
out = self.fc_out(self.actvn(net))
return out
def get_inputs():
return [torch.rand([4, 3, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from matplotlib import cm as cm
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 4096
x1 = xindex // 4096 % 3
x2 = xindex // 12288 % 3
x3 = xindex // 36864
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4096 * x2 + 12288 * x1 + 36864 * x3), None)
tl.store(out_ptr0 + x4, tmp0, None)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 3072 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 512 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 128 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4 % 512
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_mean_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 / tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(in_out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15) = args
args.clear()
assert_size_stride(primals_1, (4, 3, 3, 64, 64), (36864, 12288, 4096,
64, 1))
assert_size_stride(primals_2, (32, 3, 3, 3, 3), (81, 27, 9, 3, 1))
assert_size_stride(primals_3, (32,), (1,))
assert_size_stride(primals_4, (64, 32, 3, 3, 3), (864, 27, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 3, 3, 3), (1728, 27, 9, 3, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (256, 128, 3, 3, 3), (3456, 27, 9, 3, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (512, 256, 3, 3, 3), (6912, 27, 9, 3, 1))
assert_size_stride(primals_11, (512,), (1,))
assert_size_stride(primals_12, (512, 512, 3, 3, 3), (13824, 27, 9, 3, 1))
assert_size_stride(primals_13, (512,), (1,))
assert_size_stride(primals_14, (128, 512), (512, 1))
assert_size_stride(primals_15, (128,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 3, 64, 64), (36864, 12288, 4096,
64, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(147456)](primals_1, buf0,
147456, XBLOCK=1024, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 2, 2),
padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 32, 3, 32, 32), (98304, 3072, 1024, 32, 1)
)
del buf0
buf2 = buf1
del buf1
triton_poi_fused_convolution_relu_1[grid(393216)](buf2, primals_3,
393216, XBLOCK=512, num_warps=8, num_stages=1)
del primals_3
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(2, 2, 2),
padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 64, 2, 16, 16), (32768, 512, 256, 16, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_relu_2[grid(131072)](buf4, primals_5,
131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf5 = extern_kernels.convolution(buf4, primals_6, stride=(1, 2, 2),
padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 128, 2, 8, 8), (16384, 128, 64, 8, 1))
buf6 = buf5
del buf5
triton_poi_fused_convolution_relu_3[grid(65536)](buf6, primals_7,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_7
buf7 = extern_kernels.convolution(buf6, primals_8, stride=(2, 2, 2),
padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 256, 1, 4, 4), (4096, 16, 16, 4, 1))
buf8 = buf7
del buf7
triton_poi_fused_convolution_relu_4[grid(16384)](buf8, primals_9,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf9 = extern_kernels.convolution(buf8, primals_10, stride=(2, 2, 2
), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 512, 1, 2, 2), (2048, 4, 4, 2, 1))
buf10 = buf9
del buf9
triton_poi_fused_convolution_relu_5[grid(8192)](buf10, primals_11,
8192, XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
buf11 = extern_kernels.convolution(buf10, primals_12, stride=(2, 2,
2), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 512, 1, 1, 1), (512, 1, 1, 1, 1))
buf12 = reinterpret_tensor(buf11, (4, 512), (512, 1), 0)
del buf11
triton_poi_fused_mean_relu_6[grid(2048)](buf12, primals_13, 2048,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_13
buf13 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
extern_kernels.addmm(primals_15, buf12, reinterpret_tensor(
primals_14, (512, 128), (1, 512), 0), alpha=1, beta=1, out=buf13)
del primals_15
return (buf13, primals_2, primals_4, primals_6, primals_8, primals_10,
primals_12, reinterpret_tensor(primals_1, (4, 3, 3, 64, 64), (36864,
4096, 12288, 64, 1), 0), buf2, buf4, buf6, buf8, buf10, buf12,
primals_14)
class ConvEncoder3DNew(nn.Module):
""" Simple convolutional conditioning network.
It consists of 6 convolutional layers, each downsampling the input by a
factor of 2, and a final fully-connected layer projecting the output to
c_dim dimensions.
"""
def __init__(self, c_dim=128, hidden_dim=32, **kwargs):
""" Initialisation.
Args:
c_dim (int): output dimension of the latent embedding
"""
super().__init__()
self.conv0 = nn.Conv3d(3, hidden_dim, 3, stride=(1, 2, 2), padding=1)
self.conv1 = nn.Conv3d(hidden_dim, hidden_dim * 2, 3, stride=(2, 2,
2), padding=1)
self.conv2 = nn.Conv3d(hidden_dim * 2, hidden_dim * 4, 3, stride=(1,
2, 2), padding=1)
self.conv3 = nn.Conv3d(hidden_dim * 4, hidden_dim * 8, 3, stride=(2,
2, 2), padding=1)
self.conv4 = nn.Conv3d(hidden_dim * 8, hidden_dim * 16, 3, stride=(
2, 2, 2), padding=1)
self.conv5 = nn.Conv3d(hidden_dim * 16, hidden_dim * 16, 3, stride=
(2, 2, 2), padding=1)
self.fc_out = nn.Linear(hidden_dim * 16, c_dim)
self.actvn = nn.ReLU()
def forward(self, input_0):
primals_2 = self.conv0.weight
primals_3 = self.conv0.bias
primals_4 = self.conv1.weight
primals_5 = self.conv1.bias
primals_6 = self.conv2.weight
primals_7 = self.conv2.bias
primals_8 = self.conv3.weight
primals_9 = self.conv3.bias
primals_10 = self.conv4.weight
primals_11 = self.conv4.bias
primals_12 = self.conv5.weight
primals_13 = self.conv5.bias
primals_14 = self.fc_out.weight
primals_15 = self.fc_out.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15])
return output[0]
| ray8828/occupancy_flow | ConvEncoder3D | false | 16,401 | [
"MIT"
] | 146 | 09c172262bb151895d450eb323e2383a5c88841c | https://github.com/ray8828/occupancy_flow/tree/09c172262bb151895d450eb323e2383a5c88841c |
PitchShift | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/hs/chsm3xxgp6i2y2r5exj6x4olna3lp6zaw2xkhnoilz6r64o5gs4l.py
# Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten._to_copy, aten.arange, aten.add, aten.mul, aten.sub, aten.clamp, aten._unsafe_index]
# Source node to ATen node mapping:
# interpolate => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add_2, add_4, add_5, add_6, clamp_max_2, clamp_max_3, clamp_min_1, clamp_min_2, clamp_min_3, convert_element_type_1, convert_element_type_2, convert_element_type_3, iota_1, mul_1, mul_2, mul_3, mul_4, sub_1, sub_2, sub_3, sub_4, sub_5, sub_6
# Graph fragment:
# %convert_element_type_1 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {})
# %iota_1 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type_2 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_1, torch.float32), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_2, 0.5), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 1.0), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, 0.5), kwargs = {})
# %clamp_min_1 : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_1, 0.0), kwargs = {})
# %convert_element_type_3 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%clamp_min_1, torch.int64), kwargs = {})
# %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%unsqueeze, [None, None, %clamp_max, %clamp_max_1]), kwargs = {})
# %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%unsqueeze, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_1, %convert_element_type_3), kwargs = {})
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {})
# %clamp_max_2 : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_2), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_3), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%unsqueeze, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {})
# %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%unsqueeze, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %clamp_max_2), kwargs = {})
# %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_2), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %add_4), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %convert_element_type_1), kwargs = {})
# %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_5, 0.0), kwargs = {})
# %clamp_max_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_3, 1.0), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %clamp_max_3), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %mul_4), kwargs = {})
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0 = async_compile.triton('triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 8
x0 = xindex % 4
x2 = (xindex // 32)
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 3, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tmp13 = x0
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp14 + tmp2
tmp16 = 1.0
tmp17 = tmp15 * tmp16
tmp18 = tmp17 - tmp2
tmp19 = triton_helpers.maximum(tmp18, tmp6)
tmp20 = tmp19.to(tl.int32)
tmp21 = tmp20 + tmp9
tmp22 = triton_helpers.minimum(tmp21, tmp11)
tmp23 = tl.load(in_ptr0 + (tmp22 + (4*tmp12) + (16*x2)), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr0 + (tmp20 + (4*tmp12) + (16*x2)), xmask, eviction_policy='evict_last')
tmp25 = tmp23 - tmp24
tmp26 = tmp20.to(tl.float32)
tmp27 = tmp19 - tmp26
tmp28 = triton_helpers.maximum(tmp27, tmp6)
tmp29 = triton_helpers.minimum(tmp28, tmp16)
tmp30 = tmp25 * tmp29
tmp31 = tmp24 + tmp30
tmp32 = tl.load(in_ptr0 + (tmp20 + (4*tmp8) + (16*x2)), xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr0 + (tmp22 + (4*tmp8) + (16*x2)), xmask, eviction_policy='evict_last')
tmp34 = tmp33 - tmp32
tmp35 = tmp34 * tmp29
tmp36 = tmp32 + tmp35
tmp37 = tmp31 - tmp36
tmp38 = tmp8.to(tl.float32)
tmp39 = tmp7 - tmp38
tmp40 = triton_helpers.maximum(tmp39, tmp6)
tmp41 = triton_helpers.minimum(tmp40, tmp16)
tmp42 = tmp37 * tmp41
tmp43 = tmp36 + tmp42
tl.store(in_out_ptr0 + (x4), tmp43, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 8, 4), (32, 128, 4, 1), torch.float32)
buf1 = buf0; del buf0 # reuse
buf2 = reinterpret_tensor(buf1, (4, 1, 8, 4), (32, 32, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten._to_copy, aten.arange, aten.add, aten.mul, aten.sub, aten.clamp, aten._unsafe_index]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0.run(buf2, arg0_1, 128, grid=grid(128), stream=stream0)
del arg0_1
return (reinterpret_tensor(buf2, (4, 1, 4, 4), (32, 16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class PitchShift(nn.Module):
def __init__(self, shift):
super(PitchShift, self).__init__()
self.shift = shift
def forward(self, x):
if len(x.shape) == 2:
x = x.unsqueeze(0)
x = x.squeeze()
mel_size = x.shape[1]
shift_scale = (mel_size + self.shift) / mel_size
x = F.interpolate(x.unsqueeze(1), scale_factor=(shift_scale, 1.0),
align_corners=False, recompute_scale_factor=True, mode='bilinear'
).squeeze(1)
x = x[:, :mel_size]
if x.size(1) < mel_size:
pad_size = mel_size - x.size(1)
x = torch.cat([x, torch.zeros(x.size(0), pad_size, x.size(2))],
dim=1)
x = x.squeeze()
return x.unsqueeze(1)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'shift': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(
in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 8
x0 = xindex % 4
x2 = xindex // 32
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 3, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tmp13 = x0
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp14 + tmp2
tmp16 = 1.0
tmp17 = tmp15 * tmp16
tmp18 = tmp17 - tmp2
tmp19 = triton_helpers.maximum(tmp18, tmp6)
tmp20 = tmp19.to(tl.int32)
tmp21 = tmp20 + tmp9
tmp22 = triton_helpers.minimum(tmp21, tmp11)
tmp23 = tl.load(in_ptr0 + (tmp22 + 4 * tmp12 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp24 = tl.load(in_ptr0 + (tmp20 + 4 * tmp12 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp25 = tmp23 - tmp24
tmp26 = tmp20.to(tl.float32)
tmp27 = tmp19 - tmp26
tmp28 = triton_helpers.maximum(tmp27, tmp6)
tmp29 = triton_helpers.minimum(tmp28, tmp16)
tmp30 = tmp25 * tmp29
tmp31 = tmp24 + tmp30
tmp32 = tl.load(in_ptr0 + (tmp20 + 4 * tmp8 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp33 = tl.load(in_ptr0 + (tmp22 + 4 * tmp8 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp34 = tmp33 - tmp32
tmp35 = tmp34 * tmp29
tmp36 = tmp32 + tmp35
tmp37 = tmp31 - tmp36
tmp38 = tmp8.to(tl.float32)
tmp39 = tmp7 - tmp38
tmp40 = triton_helpers.maximum(tmp39, tmp6)
tmp41 = triton_helpers.minimum(tmp40, tmp16)
tmp42 = tmp37 * tmp41
tmp43 = tmp36 + tmp42
tl.store(in_out_ptr0 + x4, tmp43, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 8, 4), (32, 128, 4, 1), torch.float32)
buf1 = buf0
del buf0
buf2 = reinterpret_tensor(buf1, (4, 1, 8, 4), (32, 32, 4, 1), 0)
del buf1
get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid
(128)](buf2, arg0_1, 128, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf2, (4, 1, 4, 4), (32, 16, 4, 1), 0),
class PitchShiftNew(nn.Module):
def __init__(self, shift):
super(PitchShiftNew, self).__init__()
self.shift = shift
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| shaun95/StarGANv2-VC | PitchShift | false | 16,402 | [
"MIT"
] | 116 | ed20538971a03d699351a349a3631767333baeb7 | https://github.com/shaun95/StarGANv2-VC/tree/ed20538971a03d699351a349a3631767333baeb7 |
InjectNoise | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/hy/chy2gdvcdlpxio6r2ezu76sbqj6jaum4snkq6izi5zxkdqh3u2yj.py
# Topologically Sorted Source Nodes: [mul, add], Original ATen: [aten.mul, aten.add]
# Source node to ATen node mapping:
# add => add
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %randn), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %mul), kwargs = {})
triton_poi_fused_add_mul_0 = async_compile.triton('triton_poi_fused_add_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 * tmp2
tmp4 = tmp0 + tmp3
tl.store(out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [noise], Original ATen: [aten.randn]
buf0 = torch.ops.aten.randn.default([4, 1, 4, 4], device=device(type='cuda', index=0), pin_memory=False)
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, add], Original ATen: [aten.mul, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_0.run(primals_1, primals_2, buf1, buf2, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_2
return (buf2, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.utils.data
import torch.nn
class InjectNoise(nn.Module):
def __init__(self, channels):
super().__init__()
self.weight = nn.Parameter(torch.zeros(1, channels, 1, 1))
def forward(self, x):
noise = torch.randn((x.shape[0], 1, x.shape[2], x.shape[3]), device
=x.device)
return x + self.weight * noise
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4}]
| import torch
from torch import device
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.utils.data
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 * tmp2
tmp4 = tmp0 + tmp3
tl.store(out_ptr0 + x3, tmp4, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten.randn.default([4, 1, 4, 4], device=device(
type='cuda', index=0), pin_memory=False)
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(256)](primals_1, primals_2, buf1,
buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf2, buf1
class InjectNoiseNew(nn.Module):
def __init__(self, channels):
super().__init__()
self.weight = nn.Parameter(torch.zeros(1, channels, 1, 1))
def forward(self, input_0):
primals_2 = self.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| shimon-c/Machine-Learning-Collection | InjectNoise | false | 16,403 | [
"MIT"
] | 3,094 | ac5dcd03a40a08a8af7e1a67ade37f28cf88db43 | https://github.com/shimon-c/Machine-Learning-Collection/tree/ac5dcd03a40a08a8af7e1a67ade37f28cf88db43 |
ResBlk | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/wo/cwo5hzyj7r5kfs5qkbujhau55erj2h3367t3krgxxma4ysrszby7.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x => gt, mul, where
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%primals_1, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 0.2), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %primals_1, %mul), kwargs = {})
triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/vo/cvo56aotw4yuhuax6oyrf43t5ssqhzuwodjmjfylt42bqssid7vq.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# x_1 => convolution
# x_2 => gt_1, mul_1, where_1
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution, %mul_1), kwargs = {})
triton_poi_fused_convolution_leaky_relu_1 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr1 + (x3), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/iz/ciznb73q5a3l5esh4sw5ejdw3d2b6uuntjcpgiok5w7sbiizunre.py
# Topologically Sorted Source Nodes: [x_3, x_4, truediv], Original ATen: [aten.convolution, aten.add, aten.div]
# Source node to ATen node mapping:
# truediv => div
# x_3 => convolution_1
# x_4 => add
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_1, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %convolution_1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, 1.4142135623730951), kwargs = {})
triton_poi_fused_add_convolution_div_2 = async_compile.triton('triton_poi_fused_add_convolution_div_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_div_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_div_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_out_ptr0 + (x3), xmask)
tmp2 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp5 = 0.7071067811865475
tmp6 = tmp4 * tmp5
tl.store(in_out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_leaky_relu_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_1.run(buf1, primals_3, buf2, buf3, 256, grid=grid(256), stream=stream0)
del buf1
del primals_3
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [x_3, x_4, truediv], Original ATen: [aten.convolution, aten.add, aten.div]
triton_poi_fused_add_convolution_div_2.run(buf5, primals_1, primals_5, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_5
return (buf5, primals_2, primals_4, buf0, buf2, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
from torch import nn
import torch.nn.functional as F
class DownSample(nn.Module):
def __init__(self, layer_type):
super().__init__()
self.layer_type = layer_type
def forward(self, x):
if self.layer_type == 'none':
return x
elif self.layer_type == 'timepreserve':
return F.avg_pool2d(x, (2, 1))
elif self.layer_type == 'half':
return F.avg_pool2d(x, 2)
else:
raise RuntimeError(
'Got unexpected donwsampletype %s, expected is [none, timepreserve, half]'
% self.layer_type)
class ResBlk(nn.Module):
def __init__(self, dim_in, dim_out, actv=nn.LeakyReLU(0.2), normalize=
False, downsample='none'):
super().__init__()
self.actv = actv
self.normalize = normalize
self.downsample = DownSample(downsample)
self.learned_sc = dim_in != dim_out
self._build_weights(dim_in, dim_out)
def _build_weights(self, dim_in, dim_out):
self.conv1 = nn.Conv2d(dim_in, dim_in, 3, 1, 1)
self.conv2 = nn.Conv2d(dim_in, dim_out, 3, 1, 1)
if self.normalize:
self.norm1 = nn.InstanceNorm2d(dim_in, affine=True)
self.norm2 = nn.InstanceNorm2d(dim_in, affine=True)
if self.learned_sc:
self.conv1x1 = nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=False)
def _shortcut(self, x):
if self.learned_sc:
x = self.conv1x1(x)
if self.downsample:
x = self.downsample(x)
return x
def _residual(self, x):
if self.normalize:
x = self.norm1(x)
x = self.actv(x)
x = self.conv1(x)
x = self.downsample(x)
if self.normalize:
x = self.norm2(x)
x = self.actv(x)
x = self.conv2(x)
return x
def forward(self, x):
x = self._shortcut(x) + self._residual(x)
return x / math.sqrt(2)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim_in': 4, 'dim_out': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_add_convolution_div_2(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_out_ptr0 + x3, xmask)
tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp5 = 0.7071067811865475
tmp6 = tmp4 * tmp5
tl.store(in_out_ptr0 + x3, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(256)](primals_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_convolution_leaky_relu_1[grid(256)](buf1,
primals_3, buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf1
del primals_3
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = buf4
del buf4
triton_poi_fused_add_convolution_div_2[grid(256)](buf5, primals_1,
primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_5
return buf5, primals_2, primals_4, buf0, buf2, buf3
class DownSample(nn.Module):
def __init__(self, layer_type):
super().__init__()
self.layer_type = layer_type
def forward(self, x):
if self.layer_type == 'none':
return x
elif self.layer_type == 'timepreserve':
return F.avg_pool2d(x, (2, 1))
elif self.layer_type == 'half':
return F.avg_pool2d(x, 2)
else:
raise RuntimeError(
'Got unexpected donwsampletype %s, expected is [none, timepreserve, half]'
% self.layer_type)
class ResBlkNew(nn.Module):
def __init__(self, dim_in, dim_out, actv=nn.LeakyReLU(0.2), normalize=
False, downsample='none'):
super().__init__()
self.actv = actv
self.normalize = normalize
self.downsample = DownSample(downsample)
self.learned_sc = dim_in != dim_out
self._build_weights(dim_in, dim_out)
def _build_weights(self, dim_in, dim_out):
self.conv1 = nn.Conv2d(dim_in, dim_in, 3, 1, 1)
self.conv2 = nn.Conv2d(dim_in, dim_out, 3, 1, 1)
if self.normalize:
self.norm1 = nn.InstanceNorm2d(dim_in, affine=True)
self.norm2 = nn.InstanceNorm2d(dim_in, affine=True)
if self.learned_sc:
self.conv1x1 = nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=False)
def _shortcut(self, x):
if self.learned_sc:
x = self.conv1x1(x)
if self.downsample:
x = self.downsample(x)
return x
def _residual(self, x):
if self.normalize:
x = self.norm1(x)
x = self.actv(x)
x = self.conv1(x)
x = self.downsample(x)
if self.normalize:
x = self.norm2(x)
x = self.actv(x)
x = self.conv2(x)
return x
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| shaun95/StarGANv2-VC | ResBlk | false | 16,404 | [
"MIT"
] | 116 | ed20538971a03d699351a349a3631767333baeb7 | https://github.com/shaun95/StarGANv2-VC/tree/ed20538971a03d699351a349a3631767333baeb7 |
LayerNorm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/dg/cdgw6x7nju4bzp2wyuwgeanbco7zcjis6yiusovvnpz6zw3yjd3l.py
# Topologically Sorted Source Nodes: [mean, sub], Original ATen: [aten.mean, aten.sub]
# Source node to ATen node mapping:
# mean => mean
# sub => sub
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {})
triton_poi_fused_mean_sub_0 = async_compile.triton('triton_poi_fused_mean_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/lu/cluxtxibe7l27s4qhwuk23gbd42mibotfwkx6ewkc3w23drlzgks.py
# Topologically Sorted Source Nodes: [pow_1, s, add, rsqrt, x, mul_1, add_1], Original ATen: [aten.pow, aten.mean, aten.add, aten.rsqrt, aten.mul]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# mul_1 => mul_1
# pow_1 => pow_1
# rsqrt => rsqrt
# s => mean_1
# x => mul
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %mul), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {})
triton_poi_fused_add_mean_mul_pow_rsqrt_1 = async_compile.triton('triton_poi_fused_add_mean_mul_pow_rsqrt_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_mul_pow_rsqrt_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mean_mul_pow_rsqrt_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = 1e-05
tmp16 = tmp14 + tmp15
tmp17 = libdevice.rsqrt(tmp16)
tmp18 = tmp1 * tmp17
tmp19 = tmp0 * tmp18
tmp21 = tmp19 + tmp20
tl.store(out_ptr0 + (x2), tmp21, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean, sub], Original ATen: [aten.mean, aten.sub]
stream0 = get_raw_stream(0)
triton_poi_fused_mean_sub_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, s, add, rsqrt, x, mul_1, add_1], Original ATen: [aten.pow, aten.mean, aten.add, aten.rsqrt, aten.mul]
triton_poi_fused_add_mean_mul_pow_rsqrt_1.run(primals_2, buf0, primals_3, buf1, 256, grid=grid(256), stream=stream0)
del buf0
del primals_2
del primals_3
return (buf1, primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class LayerNorm(nn.Module):
"""Norm to 0-mean 1-std , then do a learned diagonal affine transform."""
def __init__(self, features, eps=1e-05):
super(LayerNorm, self).__init__()
self.scale = nn.Parameter(torch.ones(features))
self.shift = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
s = (x - mean).pow(2).mean(-1, keepdim=True)
x = (x - mean) * torch.rsqrt(s + self.eps)
return self.scale * x + self.shift
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'features': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_mean_mul_pow_rsqrt_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = 1e-05
tmp16 = tmp14 + tmp15
tmp17 = libdevice.rsqrt(tmp16)
tmp18 = tmp1 * tmp17
tmp19 = tmp0 * tmp18
tmp21 = tmp19 + tmp20
tl.store(out_ptr0 + x2, tmp21, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_sub_0[grid(256)](primals_1, buf0, 256, XBLOCK
=128, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mean_mul_pow_rsqrt_1[grid(256)](primals_2,
buf0, primals_3, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_2
del primals_3
return buf1, primals_1
class LayerNormNew(nn.Module):
"""Norm to 0-mean 1-std , then do a learned diagonal affine transform."""
def __init__(self, features, eps=1e-05):
super(LayerNormNew, self).__init__()
self.scale = nn.Parameter(torch.ones(features))
self.shift = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, input_0):
primals_2 = self.scale
primals_3 = self.shift
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| shenyunlong/naru | LayerNorm | false | 16,405 | [
"Apache-2.0"
] | 70 | 264cf4e9c96c9e34422f9eebc455a714aeef0b57 | https://github.com/shenyunlong/naru/tree/264cf4e9c96c9e34422f9eebc455a714aeef0b57 |
AdaIN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/qe/cqeosomkjl2wyhgautbj64zvn2r6slesq4xiplyeynuljwzp4fnz.py
# Topologically Sorted Source Nodes: [add, instance_norm, mul, add_1], Original ATen: [aten.add, aten._native_batch_norm_legit, aten.mul]
# Source node to ATen node mapping:
# add => add
# add_1 => add_2
# instance_norm => add_1, rsqrt, var_mean
# mul => mul_1
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_1, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %view_2), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %getitem_1), kwargs = {})
triton_per_fused__native_batch_norm_legit_add_mul_0 = async_compile.triton('triton_per_fused__native_batch_norm_legit_add_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_add_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_mul_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
x3 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp22 = tl.load(in_ptr1 + (x2 + (8*x3)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr1 + (4 + x2 + (8*x3)), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr2 + (4 + x2), xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 16.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp24 = tmp22 + tmp23
tmp25 = 1.0
tmp26 = tmp24 + tmp25
tmp27 = tmp0 - tmp10
tmp28 = tmp27 * tmp21
tmp29 = tmp26 * tmp28
tmp32 = tmp30 + tmp31
tmp33 = tmp29 + tmp32
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp21, xmask)
tl.store(out_ptr1 + (r1 + (16*x0)), tmp33, xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 8), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf4 = reinterpret_tensor(buf2, (1, 16, 1, 1), (16, 1, 1, 1), 0); del buf2 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, instance_norm, mul, add_1], Original ATen: [aten.add, aten._native_batch_norm_legit, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_add_mul_0.run(buf4, primals_4, buf0, primals_2, buf1, buf5, 16, 16, grid=grid(16), stream=stream0)
del buf0
del primals_2
return (buf5, primals_3, primals_4, buf1, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class AdaIN(nn.Module):
def __init__(self, style_dim, num_features):
super().__init__()
self.norm = nn.InstanceNorm2d(num_features, affine=False)
self.fc = nn.Linear(style_dim, num_features * 2)
def forward(self, x, s):
h = self.fc(s)
h = h.view(h.size(0), h.size(1), 1, 1)
gamma, beta = torch.chunk(h, chunks=2, dim=1)
return (1 + gamma) * self.norm(x) + beta
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'style_dim': 4, 'num_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_mul_0(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
x3 = xindex // 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp22 = tl.load(in_ptr1 + (x2 + 8 * x3), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr1 + (4 + x2 + 8 * x3), xmask, eviction_policy=
'evict_last')
tmp31 = tl.load(in_ptr2 + (4 + x2), xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 16.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp24 = tmp22 + tmp23
tmp25 = 1.0
tmp26 = tmp24 + tmp25
tmp27 = tmp0 - tmp10
tmp28 = tmp27 * tmp21
tmp29 = tmp26 * tmp28
tmp32 = tmp30 + tmp31
tmp33 = tmp29 + tmp32
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(out_ptr1 + (r1 + 16 * x0), tmp33, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 8),
(1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf4 = reinterpret_tensor(buf2, (1, 16, 1, 1), (16, 1, 1, 1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_add_mul_0[grid(16)](buf4,
primals_4, buf0, primals_2, buf1, buf5, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del buf0
del primals_2
return buf5, primals_3, primals_4, buf1, buf4
class AdaINNew(nn.Module):
def __init__(self, style_dim, num_features):
super().__init__()
self.norm = nn.InstanceNorm2d(num_features, affine=False)
self.fc = nn.Linear(style_dim, num_features * 2)
def forward(self, input_0, input_1):
primals_1 = self.fc.weight
primals_2 = self.fc.bias
primals_4 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| shaun95/StarGANv2-VC | AdaIN | false | 16,406 | [
"MIT"
] | 116 | ed20538971a03d699351a349a3631767333baeb7 | https://github.com/shaun95/StarGANv2-VC/tree/ed20538971a03d699351a349a3631767333baeb7 |
WSConv2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/of/cofwklo5nuhcnwyox2iqrwvifqkquaz5nm5gngbpzzb6xu4usmqd.py
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 0.23570226039551584), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.23570226039551584
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/at/catyivxdqgliwunkrygqmudtr3fumc4lgfzxwaogch2z5s2rnn7a.py
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution, %view), kwargs = {})
triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
triton_poi_fused_add_1.run(buf2, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
return (buf2, primals_2, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.utils.data
import torch.nn
class WSConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1,
padding=1, gain=2):
super(WSConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding)
self.scale = (gain / (in_channels * kernel_size ** 2)) ** 0.5
self.bias = self.conv.bias
self.conv.bias = None
nn.init.normal_(self.conv.weight)
nn.init.zeros_(self.bias)
def forward(self, x):
return self.conv(x * self.scale) + self.bias.view(1, self.bias.
shape[0], 1, 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.utils.data
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.23570226039551584
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](primals_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_add_1[grid(256)](buf2, primals_3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_3
return buf2, primals_2, buf0
class WSConv2dNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1,
padding=1, gain=2):
super(WSConv2dNew, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding)
self.scale = (gain / (in_channels * kernel_size ** 2)) ** 0.5
self.bias = self.conv.bias
self.conv.bias = None
nn.init.normal_(self.conv.weight)
nn.init.zeros_(self.bias)
def forward(self, input_0):
primals_3 = self.bias
primals_2 = self.conv.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| shimon-c/Machine-Learning-Collection | WSConv2d | false | 16,407 | [
"MIT"
] | 3,094 | ac5dcd03a40a08a8af7e1a67ade37f28cf88db43 | https://github.com/shimon-c/Machine-Learning-Collection/tree/ac5dcd03a40a08a8af7e1a67ade37f28cf88db43 |
WSLinear | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/fw/cfwvbe4gstgcn6dzjodlg3ivy5emtb773ndnmu54q3hlno3avnrq.py
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 0.7071067811865476), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.7071067811865476
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/t6/ct6f57cdvyh3ahq6iwyawuy7577bar2ftumjxqllolmn4c4lh7ph.py
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_3), kwargs = {})
triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
triton_poi_fused_add_1.run(buf2, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
return (buf2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.utils.data
import torch.nn
class WSLinear(nn.Module):
def __init__(self, in_features, out_features, gain=2):
super(WSLinear, self).__init__()
self.linear = nn.Linear(in_features, out_features)
self.scale = (gain / in_features) ** 0.5
self.bias = self.linear.bias
self.linear.bias = None
nn.init.normal_(self.linear.weight)
nn.init.zeros_(self.bias)
def forward(self, x):
return self.linear(x * self.scale) + self.bias
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.utils.data
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.7071067811865476
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](primals_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused_add_1[grid(256)](buf2, primals_3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_3
return buf2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0)
class WSLinearNew(nn.Module):
def __init__(self, in_features, out_features, gain=2):
super(WSLinearNew, self).__init__()
self.linear = nn.Linear(in_features, out_features)
self.scale = (gain / in_features) ** 0.5
self.bias = self.linear.bias
self.linear.bias = None
nn.init.normal_(self.linear.weight)
nn.init.zeros_(self.bias)
def forward(self, input_0):
primals_3 = self.bias
primals_2 = self.linear.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| shimon-c/Machine-Learning-Collection | WSLinear | false | 16,408 | [
"MIT"
] | 3,094 | ac5dcd03a40a08a8af7e1a67ade37f28cf88db43 | https://github.com/shimon-c/Machine-Learning-Collection/tree/ac5dcd03a40a08a8af7e1a67ade37f28cf88db43 |
WSConv2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/kz/ckzysfnmjdhibj6542hl4gprkyaefndj5yyitfjms33ia7bbwcst.py
# Topologically Sorted Source Nodes: [weight, mul, weight_1], Original ATen: [aten.native_layer_norm, aten.mul]
# Source node to ATen node mapping:
# mul => mul_1
# weight => add, mul, rsqrt, sub, var_mean
# weight_1 => mul_2
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [1, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1.0000000000000002e-10), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, 0.125), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %mul), kwargs = {})
triton_per_fused_mul_native_layer_norm_0 = async_compile.triton('triton_per_fused_mul_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_native_layer_norm_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mul_native_layer_norm_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp22 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 64.0
tmp18 = tmp16 / tmp17
tmp19 = 1.0000000000000002e-10
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp23 = 0.125
tmp24 = tmp22 * tmp23
tmp25 = tmp0 - tmp10
tmp26 = tmp25 * tmp21
tmp27 = tmp24 * tmp26
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp21, xmask)
tl.store(out_ptr1 + (r1 + (64*x0)), tmp27, xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/tc/ctcagp37ljugm52zu6ckorigrppqo67voefe2f2odg5r6hyllhyu.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_4, %mul_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32)
buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf3 = reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0); del buf1 # reuse
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [weight, mul, weight_1], Original ATen: [aten.native_layer_norm, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_mul_native_layer_norm_0.run(buf3, primals_1, primals_2, buf0, buf4, 4, 64, grid=grid(4), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(primals_4, buf4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 1, 1), (4, 1, 1, 1))
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf6, primals_3, 16, grid=grid(16), stream=stream0)
del primals_3
return (buf6, primals_1, primals_2, primals_4, buf0, buf3, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torchvision.transforms import functional as F
from torch import nn
from torch.nn import functional as F
class WSConv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True, eps=1e-05):
super().__init__(in_channels, out_channels, kernel_size, stride,
padding, dilation, groups, bias)
self.gain = nn.Parameter(torch.ones(out_channels))
self.eps = eps ** 2
fan_in = torch.numel(self.weight[0])
self.scale = fan_in ** -0.5
nn.init.kaiming_normal_(self.weight, nonlinearity='linear')
def forward(self, input):
weight = F.layer_norm(self.weight, self.weight.shape[1:], eps=self.eps)
gain = self.gain.view([-1] + [1] * (weight.ndim - 1))
weight = gain * self.scale * weight
return F.conv2d(input, weight, self.bias, self.stride, self.padding,
self.dilation, self.groups)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mul_native_layer_norm_0(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp22 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 64.0
tmp18 = tmp16 / tmp17
tmp19 = 1.0000000000000002e-10
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp23 = 0.125
tmp24 = tmp22 * tmp23
tmp25 = tmp0 - tmp10
tmp26 = tmp25 * tmp21
tmp27 = tmp24 * tmp26
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(out_ptr1 + (r1 + 64 * x0), tmp27, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32)
buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf3 = reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf1
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_mul_native_layer_norm_0[grid(4)](buf3, primals_1,
primals_2, buf0, buf4, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf5 = extern_kernels.convolution(primals_4, buf4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 1, 1), (4, 1, 1, 1))
buf6 = buf5
del buf5
triton_poi_fused_convolution_1[grid(16)](buf6, primals_3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
return buf6, primals_1, primals_2, primals_4, buf0, buf3, buf4
class WSConv2dNew(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True, eps=1e-05):
super().__init__(in_channels, out_channels, kernel_size, stride,
padding, dilation, groups, bias)
self.gain = nn.Parameter(torch.ones(out_channels))
self.eps = eps ** 2
fan_in = torch.numel(self.weight[0])
self.scale = fan_in ** -0.5
nn.init.kaiming_normal_(self.weight, nonlinearity='linear')
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = self.gain
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| rosinality/vision-transformers-pytorch | WSConv2d | false | 16,409 | [
"MIT"
] | 77 | b884b5da79900c96e4ce17fbb575cf1c5cb3cd5f | https://github.com/rosinality/vision-transformers-pytorch/tree/b884b5da79900c96e4ce17fbb575cf1c5cb3cd5f |
DQN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ky/cky64l574tkwxzjewzevqyhty73x4t3q4p6d2tu2humfvstjwiaa.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (32, 4), (4, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 32), (32, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 32), (32, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 32), (512, 128, 32, 1), 0); del buf0 # reuse
buf3 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf3, 2048, grid=grid(2048), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor(primals_4, (32, 4), (1, 32), 0), alpha=1, beta=1, out=buf2)
del primals_5
return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 32), (32, 1), 0), primals_4, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 32), (32, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class DQN(nn.Module):
def __init__(self, state_dim, out_dim, capacity, bsz, epsilon):
super().__init__()
self.steps_done = 0
self.position = 0
self.pool = []
self.capacity = capacity
self.bsz = bsz
self.epsilon = epsilon
self.fc1 = nn.Linear(state_dim, 32)
self.fc2 = nn.Linear(32, out_dim)
self.fc1.weight.data.uniform_(-0.1, 0.1)
self.fc2.weight.data.uniform_(-0.1, 0.1)
def forward(self, x):
x = F.relu(self.fc1(x))
return self.fc2(x)
def action(self, state):
self.epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / 10000
if random.random() > self.epsilon:
return self(Variable(state, volatile=True)).data.max(1)[1].view(
1, 1)
else:
return longTensor([[random.randrange(2)]])
def push(self, *args):
if len(self) < self.capacity:
self.pool.append(None)
self.pool[self.position] = Transition(*args)
self.position = (self.position + 1) % self.capacity
def sample(self):
return random.sample(self.pool, self.bsz)
def __len__(self):
return len(self.pool)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'out_dim': 4, 'capacity': 4, 'bsz': 4,
'epsilon': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import random
import torch.nn as nn
from torch.autograd import Variable
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (32, 4), (4, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 32), (32, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 32), (32, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 32), (512, 128, 32, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(2048)](buf1,
primals_2, buf3, 2048, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 32),
(32, 1), 0), reinterpret_tensor(primals_4, (32, 4), (1, 32), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 32), (32, 1), 0), primals_4, buf3
class DQNNew(nn.Module):
def __init__(self, state_dim, out_dim, capacity, bsz, epsilon):
super().__init__()
self.steps_done = 0
self.position = 0
self.pool = []
self.capacity = capacity
self.bsz = bsz
self.epsilon = epsilon
self.fc1 = nn.Linear(state_dim, 32)
self.fc2 = nn.Linear(32, out_dim)
self.fc1.weight.data.uniform_(-0.1, 0.1)
self.fc2.weight.data.uniform_(-0.1, 0.1)
def action(self, state):
self.epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / 10000
if random.random() > self.epsilon:
return self(Variable(state, volatile=True)).data.max(1)[1].view(
1, 1)
else:
return longTensor([[random.randrange(2)]])
def push(self, *args):
if len(self) < self.capacity:
self.pool.append(None)
self.pool[self.position] = Transition(*args)
self.position = (self.position + 1) % self.capacity
def sample(self):
return random.sample(self.pool, self.bsz)
def __len__(self):
return len(self.pool)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| shinoyuki222/torch-light | DQN | false | 16,410 | [
"MIT"
] | 310 | 4799805d9bcae82a9f12a574dcf9fdd838c92ee9 | https://github.com/shinoyuki222/torch-light/tree/4799805d9bcae82a9f12a574dcf9fdd838c92ee9 |
Attention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/rm/crmjcbrhesyjltwjwo2gy5ktnw7trv24ctlurkfme6ykhtfquq32.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/rb/crbncgepp7pchewiviz2ecap4hkql77bxprjbw2ciuujmpu57s6c.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (4 + y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4 + y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/pq/cpqnfrogm4dnzim2vyszfmugd6fc43gfnmxicoezmiidejzudrdz.py
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_1 => exp
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 1.0), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ry/cryn7ntc2gpkbfzbre3xh7lffx7zkbskw6oihbzsekkgajmdbki6.py
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_1 => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/bb/cbby6op7dmkjsypxm4o3urasth73g6q5oi4ddo6uk6dsuv6off2v.py
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul_1 => clone_3
# Graph fragment:
# %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (8 + y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (8 + y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/we/cwe54p4p4jvwbdktkpj3wy2coheu6f3r3dgvi7ozm7xjfk4mgbwx.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# x_1 => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%view_9,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_5 = async_compile.triton('triton_poi_fused_clone_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/36/c36mdvdlmkr4g6rcquwj2pbp2ke2mrvmhx3r7akoexvrkzzfhdye.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x_1 => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %primals_5), kwargs = {})
triton_poi_fused_add_6 = async_compile.triton('triton_poi_fused_add_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 12), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf0, primals_3, buf1, 16, 4, grid=grid(16, 4), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf0, primals_3, buf2, 16, 4, grid=grid(16, 4), stream=stream0)
buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf2, (16, 1, 4), (4, 0, 1), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf3, buf4, 256, grid=grid(256), stream=stream0)
buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf4, buf5, 256, grid=grid(256), stream=stream0)
del buf4
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf0, primals_3, buf6, 16, 4, grid=grid(16, 4), stream=stream0)
del buf0
del primals_3
buf7 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 0), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone]
triton_poi_fused_clone_5.run(buf7, buf8, 16, 4, grid=grid(16, 4), stream=stream0)
buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf8, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf9)
buf10 = reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.add]
triton_poi_fused_add_6.run(buf10, primals_5, 64, grid=grid(64), stream=stream0)
del primals_5
return (buf10, buf5, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf5, reinterpret_tensor(buf8, (16, 4), (4, 1), 0), primals_4, reinterpret_tensor(buf6, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf1, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Attention(nn.Module):
def __init__(self, dim, heads, dropout):
super().__init__()
self.heads = heads
head_dim = dim // heads
self.scale = head_dim ** -0.5
self.attn = None
self.qkv = nn.Linear(dim, dim * 3)
self.attn_drop = nn.Dropout(dropout)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(dropout)
@property
def unwrapped(self):
return self
def forward(self, x, mask=None):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.heads, C // self.heads
).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = q @ k.transpose(-2, -1) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4, 'heads': 4, 'dropout': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (4 + y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4 + y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (8 + y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (8 + y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 12), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_3, buf1, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32)
triton_poi_fused_clone_1[grid(16, 4)](buf0, primals_3, buf2, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf2, (16, 1, 4), (4, 0, 1), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
triton_poi_fused__softmax_3[grid(256)](buf4, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf4
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_4[grid(16, 4)](buf0, primals_3, buf6, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del buf0
del primals_3
buf7 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 0), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_5[grid(16, 4)](buf7, buf8, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0)
del buf7
extern_kernels.mm(reinterpret_tensor(buf8, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf9)
buf10 = reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0)
del buf9
triton_poi_fused_add_6[grid(64)](buf10, primals_5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_5
return buf10, buf5, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), buf5, reinterpret_tensor(buf8, (16, 4), (4, 1), 0
), primals_4, reinterpret_tensor(buf6, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf1, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 4), 0)
class AttentionNew(nn.Module):
def __init__(self, dim, heads, dropout):
super().__init__()
self.heads = heads
head_dim = dim // heads
self.scale = head_dim ** -0.5
self.attn = None
self.qkv = nn.Linear(dim, dim * 3)
self.attn_drop = nn.Dropout(dropout)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(dropout)
@property
def unwrapped(self):
return self
def forward(self, input_0):
primals_2 = self.qkv.weight
primals_3 = self.qkv.bias
primals_4 = self.proj.weight
primals_5 = self.proj.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1]
| shampooma/segmenter | Attention | false | 16,411 | [
"MIT"
] | 418 | b08fd481da6758e37d108ba28676229b62f757aa | https://github.com/shampooma/segmenter/tree/b08fd481da6758e37d108ba28676229b62f757aa |
Early_StyleConv_Block | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/6s/c6ssu7mmw5kesdtj3v7fv2fbx7eau2sv54jqhqb3ztgib5cqvqu2.py
# Topologically Sorted Source Nodes: [weight], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# weight => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, 0.7071067811865476), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.7071067811865476
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/d7/cd72qbtermy7xtw64je5kvlvqx52hkdpatbfqpqnz2opkmresvsx.py
# Topologically Sorted Source Nodes: [weight_1], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# weight_1 => mul_2
# Graph fragment:
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, 0.7071067811865476), kwargs = {})
triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.7071067811865476
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/rj/crjnce6eymavdflord56pzehm7rgev4dzvdgiuz65hgovoo7oj7e.py
# Topologically Sorted Source Nodes: [result, result_3, mul_3, result_4, result_5], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.mul, aten.add, aten.leaky_relu]
# Source node to ATen node mapping:
# mul_3 => mul_4
# result => repeat
# result_3 => add_1, rsqrt, var_mean
# result_4 => add_2
# result_5 => gt, mul_5, where
# Graph fragment:
# %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_1, [4, 1, 1, 1]), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %getitem), kwargs = {})
# %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %getitem_1), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_2, 0), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 0.2), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add_2, %mul_5), kwargs = {})
triton_per_fused__native_batch_norm_legit_add_leaky_relu_mul_repeat_2 = async_compile.triton('triton_per_fused__native_batch_norm_legit_add_leaky_relu_mul_repeat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_add_leaky_relu_mul_repeat_2', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_leaky_relu_mul_repeat_2(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 4
x3 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (r2 + (16*x0)), xmask, eviction_policy='evict_last', other=0.0)
tmp1 = tl.load(in_ptr1 + (r2), None, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x3 % 4), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr3 + (x0 + (8*x1)), xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr3 + (4 + x0 + (8*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 * tmp2
tmp4 = tmp0 + tmp3
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp5, 0)
tmp8 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp10 = tl.where(xmask, tmp8, 0)
tmp11 = tl.sum(tmp10, 1)[:, None]
tmp12 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp13 = tmp12.to(tl.float32)
tmp14 = tmp11 / tmp13
tmp15 = tmp5 - tmp14
tmp16 = tmp15 * tmp15
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = tl.where(xmask, tmp17, 0)
tmp20 = tl.sum(tmp19, 1)[:, None]
tmp21 = 16.0
tmp22 = tmp20 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.rsqrt(tmp24)
tmp27 = tmp1 * tmp26
tmp28 = tmp0 + tmp27
tmp29 = tmp28 - tmp14
tmp30 = tmp29 * tmp25
tmp32 = tmp30 * tmp31
tmp34 = tmp32 + tmp33
tmp35 = 0.0
tmp36 = tmp34 > tmp35
tmp37 = 0.2
tmp38 = tmp34 * tmp37
tmp39 = tl.where(tmp36, tmp34, tmp38)
tl.store(out_ptr0 + (r2 + (16*x3)), tmp0, xmask)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x3), tmp25, xmask)
tl.store(in_out_ptr1 + (r2 + (16*x3)), tmp39, xmask)
tl.store(out_ptr1 + (x3), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/cs/ccs4b4jy66au5erfv5ifk6fproa7b5z5lu6y3vjlatjl3veiqwwn.py
# Topologically Sorted Source Nodes: [weight_2], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# weight_2 => mul_6
# Graph fragment:
# %mul_6 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_7, 0.23570226039551584), kwargs = {})
triton_poi_fused_mul_3 = async_compile.triton('triton_poi_fused_mul_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.23570226039551584
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/wx/cwxqbjzszfpdmn2s6ttfpzgq54xal4jdh6c2ucbgsb7lfkfp72sl.py
# Topologically Sorted Source Nodes: [result_6, result_9, mul_8, result_10, result_11], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.mul, aten.add, aten.leaky_relu]
# Source node to ATen node mapping:
# mul_8 => mul_11
# result_10 => add_5
# result_11 => gt_1, mul_12, where_1
# result_6 => convolution
# result_9 => add_4, rsqrt_1, var_mean_1
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %mul_6, %primals_8, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_2, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_6, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, %getitem_4), kwargs = {})
# %add_5 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_11, %getitem_5), kwargs = {})
# %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_5, 0), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_5, 0.2), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %add_5, %mul_12), kwargs = {})
triton_per_fused__native_batch_norm_legit_add_convolution_leaky_relu_mul_4 = async_compile.triton('triton_per_fused__native_batch_norm_legit_add_convolution_leaky_relu_mul_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_add_convolution_leaky_relu_mul_4', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1', 'in_out_ptr2'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_convolution_leaky_relu_mul_4(in_out_ptr0, in_out_ptr1, in_out_ptr2, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_out_ptr0 + (r2 + (16*x3)), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (r2), None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + (x3 % 4), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr3 + (x0 + (8*x1)), xmask, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr3 + (4 + x0 + (8*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp12 = tl.where(xmask, tmp10, 0)
tmp13 = tl.sum(tmp12, 1)[:, None]
tmp14 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp15 = tmp14.to(tl.float32)
tmp16 = tmp13 / tmp15
tmp17 = tmp7 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK])
tmp21 = tl.where(xmask, tmp19, 0)
tmp22 = tl.sum(tmp21, 1)[:, None]
tmp23 = 16.0
tmp24 = tmp22 / tmp23
tmp25 = 1e-05
tmp26 = tmp24 + tmp25
tmp27 = libdevice.rsqrt(tmp26)
tmp29 = tmp3 * tmp28
tmp30 = tmp2 + tmp29
tmp31 = tmp30 - tmp16
tmp32 = tmp31 * tmp27
tmp34 = tmp32 * tmp33
tmp36 = tmp34 + tmp35
tmp37 = 0.0
tmp38 = tmp36 > tmp37
tmp39 = 0.2
tmp40 = tmp36 * tmp39
tmp41 = tl.where(tmp38, tmp36, tmp40)
tl.store(in_out_ptr0 + (r2 + (16*x3)), tmp2, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x3), tmp27, xmask)
tl.store(in_out_ptr2 + (r2 + (16*x3)), tmp41, xmask)
tl.store(out_ptr0 + (x3), tmp16, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (8, 4), (4, 1))
assert_size_stride(primals_5, (8, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_10, (8, 4), (4, 1))
assert_size_stride(primals_11, (8, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [weight], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_3, buf1, 4, grid=grid(4), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((8, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [weight_1], Original ATen: [aten.mul]
triton_poi_fused_mul_1.run(primals_4, buf2, 32, grid=grid(32), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, primals_6, reinterpret_tensor(buf2, (4, 8), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_5
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf5 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf7 = reinterpret_tensor(buf5, (1, 16, 1, 1), (16, 1, 1, 1), 0); del buf5 # reuse
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [result, result_3, mul_3, result_4, result_5], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.mul, aten.add, aten.leaky_relu]
triton_per_fused__native_batch_norm_legit_add_leaky_relu_mul_repeat_2.run(buf7, buf9, primals_1, primals_2, buf1, buf3, buf0, buf4, 16, 16, grid=grid(16), stream=stream0)
del primals_1
buf10 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [weight_2], Original ATen: [aten.mul]
triton_poi_fused_mul_3.run(primals_7, buf10, 144, grid=grid(144), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [result_6], Original ATen: [aten.convolution]
buf11 = extern_kernels.convolution(buf9, buf10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 4, 4, 4), (64, 16, 4, 1))
buf13 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [weight_3], Original ATen: [aten.mul]
triton_poi_fused_mul_0.run(primals_9, buf13, 4, grid=grid(4), stream=stream0)
del primals_9
buf14 = empty_strided_cuda((8, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [weight_4], Original ATen: [aten.mul]
triton_poi_fused_mul_1.run(primals_10, buf14, 32, grid=grid(32), stream=stream0)
del primals_10
buf15 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, primals_6, reinterpret_tensor(buf14, (4, 8), (1, 4), 0), alpha=1, beta=1, out=buf15)
del primals_11
buf12 = buf11; del buf11 # reuse
buf16 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf17 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf19 = reinterpret_tensor(buf17, (1, 16, 1, 1), (16, 1, 1, 1), 0); del buf17 # reuse
buf20 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf21 = buf20; del buf20 # reuse
# Topologically Sorted Source Nodes: [result_6, result_9, mul_8, result_10, result_11], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.mul, aten.add, aten.leaky_relu]
triton_per_fused__native_batch_norm_legit_add_convolution_leaky_relu_mul_4.run(buf12, buf19, buf21, primals_8, primals_2, buf13, buf15, buf16, 16, 16, grid=grid(16), stream=stream0)
del primals_8
return (buf21, buf1, buf2, buf10, buf13, buf14, primals_2, primals_6, buf0, buf1, reinterpret_tensor(buf3, (4, 4, 1, 1), (8, 1, 1, 1), 0), reinterpret_tensor(buf3, (4, 4, 1, 1), (8, 1, 1, 1), 4), buf4, buf7, buf9, buf10, buf12, buf13, reinterpret_tensor(buf15, (4, 4, 1, 1), (8, 1, 1, 1), 0), reinterpret_tensor(buf15, (4, 4, 1, 1), (8, 1, 1, 1), 4), buf16, buf19, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
def quick_scale(module, name='weight'):
ScaleW.apply(module, name)
return module
class ScaleW:
"""
Constructor: name - name of attribute to be scaled
"""
def __init__(self, name):
self.name = name
def scale(self, module):
weight = getattr(module, self.name + '_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * math.sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
"""
Apply runtime scaling to specific module
"""
hook = ScaleW(name)
weight = getattr(module, name)
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
del module._parameters[name]
module.register_forward_pre_hook(hook)
def __call__(self, module, whatever):
weight = self.scale(module)
setattr(module, self.name, weight)
class SLinear(nn.Module):
def __init__(self, dim_in, dim_out):
super().__init__()
linear = nn.Linear(dim_in, dim_out)
linear.weight.data.normal_()
linear.bias.data.zero_()
self.linear = quick_scale(linear)
def forward(self, x):
return self.linear(x)
class SConv2d(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
conv = nn.Conv2d(*args, **kwargs)
conv.weight.data.normal_()
conv.bias.data.zero_()
self.conv = quick_scale(conv)
def forward(self, x):
return self.conv(x)
class FC_A(nn.Module):
"""
Learned affine transform A, this module is used to transform
midiate vector w into a style vector
"""
def __init__(self, dim_latent, n_channel):
super().__init__()
self.transform = SLinear(dim_latent, n_channel * 2)
self.transform.linear.bias.data[:n_channel] = 1
self.transform.linear.bias.data[n_channel:] = 0
def forward(self, w):
style = self.transform(w).unsqueeze(2).unsqueeze(3)
return style
class AdaIn(nn.Module):
"""
adaptive instance normalization
"""
def __init__(self, n_channel):
super().__init__()
self.norm = nn.InstanceNorm2d(n_channel)
def forward(self, image, style):
factor, bias = style.chunk(2, 1)
result = self.norm(image)
result = result * factor + bias
return result
class Scale_B(nn.Module):
"""
Learned per-channel scale factor, used to scale the noise
"""
def __init__(self, n_channel):
super().__init__()
self.weight = nn.Parameter(torch.zeros((1, n_channel, 1, 1)))
def forward(self, noise):
result = noise * self.weight
return result
class Early_StyleConv_Block(nn.Module):
"""
This is the very first block of generator that get the constant value as input
"""
def __init__(self, n_channel, dim_latent, dim_input):
super().__init__()
self.constant = nn.Parameter(torch.randn(1, n_channel, dim_input,
dim_input))
self.style1 = FC_A(dim_latent, n_channel)
self.style2 = FC_A(dim_latent, n_channel)
self.noise1 = quick_scale(Scale_B(n_channel))
self.noise2 = quick_scale(Scale_B(n_channel))
self.adain = AdaIn(n_channel)
self.lrelu = nn.LeakyReLU(0.2)
self.conv = SConv2d(n_channel, n_channel, 3, padding=1)
def forward(self, latent_w, noise):
result = self.constant.repeat(noise.shape[0], 1, 1, 1)
result = result + self.noise1(noise)
result = self.adain(result, self.style1(latent_w))
result = self.lrelu(result)
result = self.conv(result)
result = result + self.noise2(noise)
result = self.adain(result, self.style2(latent_w))
result = self.lrelu(result)
return result
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'n_channel': 4, 'dim_latent': 4, 'dim_input': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.7071067811865476
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.7071067811865476
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_leaky_relu_mul_repeat_2(
in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 4
x3 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + (r2 + 16 * x0), xmask, eviction_policy=
'evict_last', other=0.0)
tmp1 = tl.load(in_ptr1 + r2, None, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x3 % 4, xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr3 + (x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp33 = tl.load(in_ptr3 + (4 + x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 * tmp2
tmp4 = tmp0 + tmp3
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tl.where(xmask, tmp5, 0)
tmp8 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp10 = tl.where(xmask, tmp8, 0)
tmp11 = tl.sum(tmp10, 1)[:, None]
tmp12 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp13 = tmp12.to(tl.float32)
tmp14 = tmp11 / tmp13
tmp15 = tmp5 - tmp14
tmp16 = tmp15 * tmp15
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = tl.where(xmask, tmp17, 0)
tmp20 = tl.sum(tmp19, 1)[:, None]
tmp21 = 16.0
tmp22 = tmp20 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.rsqrt(tmp24)
tmp27 = tmp1 * tmp26
tmp28 = tmp0 + tmp27
tmp29 = tmp28 - tmp14
tmp30 = tmp29 * tmp25
tmp32 = tmp30 * tmp31
tmp34 = tmp32 + tmp33
tmp35 = 0.0
tmp36 = tmp34 > tmp35
tmp37 = 0.2
tmp38 = tmp34 * tmp37
tmp39 = tl.where(tmp36, tmp34, tmp38)
tl.store(out_ptr0 + (r2 + 16 * x3), tmp0, xmask)
tl.debug_barrier()
tl.store(in_out_ptr0 + x3, tmp25, xmask)
tl.store(in_out_ptr1 + (r2 + 16 * x3), tmp39, xmask)
tl.store(out_ptr1 + x3, tmp14, xmask)
@triton.jit
def triton_poi_fused_mul_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.23570226039551584
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_convolution_leaky_relu_mul_4(
in_out_ptr0, in_out_ptr1, in_out_ptr2, in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + r2, None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + x3 % 4, xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr3 + (x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp35 = tl.load(in_ptr3 + (4 + x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tl.where(xmask, tmp7, 0)
tmp10 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp12 = tl.where(xmask, tmp10, 0)
tmp13 = tl.sum(tmp12, 1)[:, None]
tmp14 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp15 = tmp14.to(tl.float32)
tmp16 = tmp13 / tmp15
tmp17 = tmp7 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK])
tmp21 = tl.where(xmask, tmp19, 0)
tmp22 = tl.sum(tmp21, 1)[:, None]
tmp23 = 16.0
tmp24 = tmp22 / tmp23
tmp25 = 1e-05
tmp26 = tmp24 + tmp25
tmp27 = libdevice.rsqrt(tmp26)
tmp29 = tmp3 * tmp28
tmp30 = tmp2 + tmp29
tmp31 = tmp30 - tmp16
tmp32 = tmp31 * tmp27
tmp34 = tmp32 * tmp33
tmp36 = tmp34 + tmp35
tmp37 = 0.0
tmp38 = tmp36 > tmp37
tmp39 = 0.2
tmp40 = tmp36 * tmp39
tmp41 = tl.where(tmp38, tmp36, tmp40)
tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x3, tmp27, xmask)
tl.store(in_out_ptr2 + (r2 + 16 * x3), tmp41, xmask)
tl.store(out_ptr0 + x3, tmp16, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (1, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (8, 4), (4, 1))
assert_size_stride(primals_5, (8,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_10, (8, 4), (4, 1))
assert_size_stride(primals_11, (8,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(4)](primals_3, buf1, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((8, 4), (4, 1), torch.float32)
triton_poi_fused_mul_1[grid(32)](primals_4, buf2, 32, XBLOCK=32,
num_warps=1, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
extern_kernels.addmm(primals_5, primals_6, reinterpret_tensor(buf2,
(4, 8), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_5
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf5 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf7 = reinterpret_tensor(buf5, (1, 16, 1, 1), (16, 1, 1, 1), 0)
del buf5
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf9 = buf8
del buf8
triton_per_fused__native_batch_norm_legit_add_leaky_relu_mul_repeat_2[
grid(16)](buf7, buf9, primals_1, primals_2, buf1, buf3, buf0,
buf4, 16, 16, XBLOCK=8, num_warps=2, num_stages=1)
del primals_1
buf10 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32)
triton_poi_fused_mul_3[grid(144)](primals_7, buf10, 144, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_7
buf11 = extern_kernels.convolution(buf9, buf10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 4, 4, 4), (64, 16, 4, 1))
buf13 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 1, 1), torch.float32)
triton_poi_fused_mul_0[grid(4)](primals_9, buf13, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_9
buf14 = empty_strided_cuda((8, 4), (4, 1), torch.float32)
triton_poi_fused_mul_1[grid(32)](primals_10, buf14, 32, XBLOCK=32,
num_warps=1, num_stages=1)
del primals_10
buf15 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
extern_kernels.addmm(primals_11, primals_6, reinterpret_tensor(
buf14, (4, 8), (1, 4), 0), alpha=1, beta=1, out=buf15)
del primals_11
buf12 = buf11
del buf11
buf16 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf17 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
buf19 = reinterpret_tensor(buf17, (1, 16, 1, 1), (16, 1, 1, 1), 0)
del buf17
buf20 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf21 = buf20
del buf20
triton_per_fused__native_batch_norm_legit_add_convolution_leaky_relu_mul_4[
grid(16)](buf12, buf19, buf21, primals_8, primals_2, buf13,
buf15, buf16, 16, 16, XBLOCK=1, num_warps=2, num_stages=1)
del primals_8
return (buf21, buf1, buf2, buf10, buf13, buf14, primals_2, primals_6,
buf0, buf1, reinterpret_tensor(buf3, (4, 4, 1, 1), (8, 1, 1, 1), 0),
reinterpret_tensor(buf3, (4, 4, 1, 1), (8, 1, 1, 1), 4), buf4, buf7,
buf9, buf10, buf12, buf13, reinterpret_tensor(buf15, (4, 4, 1, 1),
(8, 1, 1, 1), 0), reinterpret_tensor(buf15, (4, 4, 1, 1), (8, 1, 1,
1), 4), buf16, buf19)
def quick_scale(module, name='weight'):
ScaleW.apply(module, name)
return module
class ScaleW:
"""
Constructor: name - name of attribute to be scaled
"""
def __init__(self, name):
self.name = name
def scale(self, module):
weight = getattr(module, self.name + '_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * math.sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
"""
Apply runtime scaling to specific module
"""
hook = ScaleW(name)
weight = getattr(module, name)
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
del module._parameters[name]
module.register_forward_pre_hook(hook)
def __call__(self, module, whatever):
weight = self.scale(module)
setattr(module, self.name, weight)
class SLinear(nn.Module):
def __init__(self, dim_in, dim_out):
super().__init__()
linear = nn.Linear(dim_in, dim_out)
linear.weight.data.normal_()
linear.bias.data.zero_()
self.linear = quick_scale(linear)
def forward(self, x):
return self.linear(x)
class SConv2d(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
conv = nn.Conv2d(*args, **kwargs)
conv.weight.data.normal_()
conv.bias.data.zero_()
self.conv = quick_scale(conv)
def forward(self, x):
return self.conv(x)
class FC_A(nn.Module):
"""
Learned affine transform A, this module is used to transform
midiate vector w into a style vector
"""
def __init__(self, dim_latent, n_channel):
super().__init__()
self.transform = SLinear(dim_latent, n_channel * 2)
self.transform.linear.bias.data[:n_channel] = 1
self.transform.linear.bias.data[n_channel:] = 0
def forward(self, w):
style = self.transform(w).unsqueeze(2).unsqueeze(3)
return style
class AdaIn(nn.Module):
"""
adaptive instance normalization
"""
def __init__(self, n_channel):
super().__init__()
self.norm = nn.InstanceNorm2d(n_channel)
def forward(self, image, style):
factor, bias = style.chunk(2, 1)
result = self.norm(image)
result = result * factor + bias
return result
class Scale_B(nn.Module):
"""
Learned per-channel scale factor, used to scale the noise
"""
def __init__(self, n_channel):
super().__init__()
self.weight = nn.Parameter(torch.zeros((1, n_channel, 1, 1)))
def forward(self, noise):
result = noise * self.weight
return result
class Early_StyleConv_BlockNew(nn.Module):
"""
This is the very first block of generator that get the constant value as input
"""
def __init__(self, n_channel, dim_latent, dim_input):
super().__init__()
self.constant = nn.Parameter(torch.randn(1, n_channel, dim_input,
dim_input))
self.style1 = FC_A(dim_latent, n_channel)
self.style2 = FC_A(dim_latent, n_channel)
self.noise1 = quick_scale(Scale_B(n_channel))
self.noise2 = quick_scale(Scale_B(n_channel))
self.adain = AdaIn(n_channel)
self.lrelu = nn.LeakyReLU(0.2)
self.conv = SConv2d(n_channel, n_channel, 3, padding=1)
def forward(self, input_0, input_1):
primals_1 = self.constant
primals_5 = self.style1.transform.linear.bias
primals_4 = self.style1.transform.linear.weight_orig
primals_11 = self.style2.transform.linear.bias
primals_10 = self.style2.transform.linear.weight_orig
primals_3 = self.noise1.weight_orig
primals_9 = self.noise2.weight_orig
primals_8 = self.conv.conv.bias
primals_7 = self.conv.conv.weight_orig
primals_2 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| sergkuzn148/stg | Early_StyleConv_Block | false | 16,412 | [
"MIT"
] | 96 | 84d9f53ae3665c423836a4d0176dc3b22de62b19 | https://github.com/sergkuzn148/stg/tree/84d9f53ae3665c423836a4d0176dc3b22de62b19 |
ConvBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/of/cofwklo5nuhcnwyox2iqrwvifqkquaz5nm5gngbpzzb6xu4usmqd.py
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 0.23570226039551584), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.23570226039551584
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/c5/cc5memys4mk36uelimkmwaoyfedm2jyi3w33cemddvv5wt2gg6tn.py
# Topologically Sorted Source Nodes: [add, x, mul_1], Original ATen: [aten.add, aten.leaky_relu, aten.mul]
# Source node to ATen node mapping:
# add => add
# mul_1 => mul_2
# x => gt, mul_1, where
# Graph fragment:
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution, %view), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.2), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add, %mul_1), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where, 0.23570226039551584), kwargs = {})
triton_poi_fused_add_leaky_relu_mul_1 = async_compile.triton('triton_poi_fused_add_leaky_relu_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_leaky_relu_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_leaky_relu_mul_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = 0.23570226039551584
tmp9 = tmp7 * tmp8
tl.store(out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr1 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/qc/cqc2xwirgawnoilr4he7zwv5ev5jlr7h6gndq46ktmuwecxnmzjx.py
# Topologically Sorted Source Nodes: [add_1, x_1], Original ATen: [aten.add, aten.leaky_relu]
# Source node to ATen node mapping:
# add_1 => add_1
# x_1 => gt_1, mul_3, where_1
# Graph fragment:
# %add_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %view_1), kwargs = {})
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_1, 0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 0.2), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %add_1, %mul_3), kwargs = {})
triton_poi_fused_add_leaky_relu_2 = async_compile.triton('triton_poi_fused_add_leaky_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_leaky_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr1 + (x3), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, x, mul_1], Original ATen: [aten.add, aten.leaky_relu, aten.mul]
triton_poi_fused_add_leaky_relu_mul_1.run(buf1, primals_3, buf2, buf3, 256, grid=grid(256), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf6 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [add_1, x_1], Original ATen: [aten.add, aten.leaky_relu]
triton_poi_fused_add_leaky_relu_2.run(buf4, primals_5, buf5, buf6, 256, grid=grid(256), stream=stream0)
del buf4
del primals_5
return (buf6, primals_2, primals_4, buf0, buf2, buf3, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.utils.data
import torch.nn
class WSConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1,
padding=1, gain=2):
super(WSConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding)
self.scale = (gain / (in_channels * kernel_size ** 2)) ** 0.5
self.bias = self.conv.bias
self.conv.bias = None
nn.init.normal_(self.conv.weight)
nn.init.zeros_(self.bias)
def forward(self, x):
return self.conv(x * self.scale) + self.bias.view(1, self.bias.
shape[0], 1, 1)
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvBlock, self).__init__()
self.conv1 = WSConv2d(in_channels, out_channels)
self.conv2 = WSConv2d(out_channels, out_channels)
self.leaky = nn.LeakyReLU(0.2)
def forward(self, x):
x = self.leaky(self.conv1(x))
x = self.leaky(self.conv2(x))
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.utils.data
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.23570226039551584
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_leaky_relu_mul_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = 0.23570226039551584
tmp9 = tmp7 * tmp8
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](primals_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_leaky_relu_mul_1[grid(256)](buf1, primals_3,
buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf6 = buf1
del buf1
triton_poi_fused_add_leaky_relu_2[grid(256)](buf4, primals_5, buf5,
buf6, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf4
del primals_5
return buf6, primals_2, primals_4, buf0, buf2, buf3, buf5
class WSConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1,
padding=1, gain=2):
super(WSConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding)
self.scale = (gain / (in_channels * kernel_size ** 2)) ** 0.5
self.bias = self.conv.bias
self.conv.bias = None
nn.init.normal_(self.conv.weight)
nn.init.zeros_(self.bias)
def forward(self, x):
return self.conv(x * self.scale) + self.bias.view(1, self.bias.
shape[0], 1, 1)
class ConvBlockNew(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvBlockNew, self).__init__()
self.conv1 = WSConv2d(in_channels, out_channels)
self.conv2 = WSConv2d(out_channels, out_channels)
self.leaky = nn.LeakyReLU(0.2)
def forward(self, input_0):
primals_3 = self.conv1.bias
primals_2 = self.conv1.conv.weight
primals_5 = self.conv2.bias
primals_4 = self.conv2.conv.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| shimon-c/Machine-Learning-Collection | ConvBlock | false | 16,413 | [
"MIT"
] | 3,094 | ac5dcd03a40a08a8af7e1a67ade37f28cf88db43 | https://github.com/shimon-c/Machine-Learning-Collection/tree/ac5dcd03a40a08a8af7e1a67ade37f28cf88db43 |
PaddedInstanceNorm1d | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/oo/coocmvex5nlnnqvl7zaddk7e3nrsodkasy7xdvohmswagcepm2dg.py
# Topologically Sorted Source Nodes: [sum_, mean, pow_1, sqsum, sqmean, pow_2, var, add, pow_3], Original ATen: [aten.sum, aten.div, aten.pow, aten.sub, aten.add]
# Source node to ATen node mapping:
# add => add
# mean => div
# pow_1 => pow_1
# pow_2 => pow_2
# pow_3 => pow_3
# sqmean => div_1
# sqsum => sum_2
# sum_ => sum_1
# var => sub
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg1_1, [2], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %view), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 2.0), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [2], True), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, %view), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%div, 2.0), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_1, %pow_2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, 1e-05), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 0.5), kwargs = {})
triton_poi_fused_add_div_pow_sub_sum_0 = async_compile.triton('triton_poi_fused_add_div_pow_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_pow_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_pow_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 4
x3 = (xindex // 4)
x1 = (xindex // 4) % 16384
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x3)), None)
tmp1 = tl.load(in_ptr0 + (4 + x0 + (16*x3)), None)
tmp3 = tl.load(in_ptr0 + (8 + x0 + (16*x3)), None)
tmp5 = tl.load(in_ptr0 + (12 + x0 + (16*x3)), None)
tmp7 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 / tmp7
tmp9 = tmp0 * tmp0
tmp10 = tmp1 * tmp1
tmp11 = tmp9 + tmp10
tmp12 = tmp3 * tmp3
tmp13 = tmp11 + tmp12
tmp14 = tmp5 * tmp5
tmp15 = tmp13 + tmp14
tmp16 = tmp15 / tmp7
tmp17 = tmp8 * tmp8
tmp18 = tmp16 - tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.sqrt(tmp20)
tl.store(out_ptr0 + (x4), tmp8, None)
tl.store(out_ptr1 + (x4), tmp21, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/el/celssy2sail7q42oq24ur6lmcghikc5sxwlugerb36l4kloss4y5.py
# Topologically Sorted Source Nodes: [sub_1, pow_1, sqsum, sqmean, pow_2, var, add, pow_3, truediv_2], Original ATen: [aten.sub, aten.pow, aten.sum, aten.div, aten.add]
# Source node to ATen node mapping:
# add => add
# pow_1 => pow_1
# pow_2 => pow_2
# pow_3 => pow_3
# sqmean => div_1
# sqsum => sum_2
# sub_1 => sub_1
# truediv_2 => div_2
# var => sub
# Graph fragment:
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %div), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 2.0), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [2], True), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, %view), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%div, 2.0), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_1, %pow_2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, 1e-05), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 0.5), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_1, %pow_3), kwargs = {})
triton_poi_fused_add_div_pow_sub_sum_1 = async_compile.triton('triton_poi_fused_add_div_pow_sub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_pow_sub_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_pow_sub_sum_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x0 + (4*x2)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + (4*x2)), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 / tmp3
tl.store(out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 256, 4, 4), (4096, 16, 4, 1))
assert_size_stride(arg1_1, (4, 16384, 4, 4), (262144, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16384, 1, 4), (65536, 4, 262144, 1), torch.float32)
buf1 = empty_strided_cuda((4, 16384, 1, 4), (65536, 4, 262144, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_, mean, pow_1, sqsum, sqmean, pow_2, var, add, pow_3], Original ATen: [aten.sum, aten.div, aten.pow, aten.sub, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_pow_sub_sum_0.run(arg1_1, arg0_1, buf0, buf1, 262144, grid=grid(262144), stream=stream0)
del arg0_1
buf2 = empty_strided_cuda((4, 16384, 4, 4), (262144, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub_1, pow_1, sqsum, sqmean, pow_2, var, add, pow_3, truediv_2], Original ATen: [aten.sub, aten.pow, aten.sum, aten.div, aten.add]
triton_poi_fused_add_div_pow_sub_sum_1.run(arg1_1, buf0, buf1, buf2, 1048576, grid=grid(1048576), stream=stream0)
del arg1_1
del buf0
del buf1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 256, 4, 4), (4096, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 16384, 4, 4), (262144, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class PaddedInstanceNorm1d(nn.Module):
def __init__(self, num_features, eps=1e-05, momentum=0.1, affine=False,
track_running_stats=False):
super().__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
if affine is True:
raise NotImplementedError
if track_running_stats is True:
raise NotImplementedError
def forward(self, x, lengths):
lengths = lengths.view(-1, 1, 1).float()
sum_ = torch.sum(x, dim=2, keepdim=True)
mean = sum_ / lengths
sqsum = torch.sum(torch.pow(x, 2.0), dim=2, keepdim=True)
sqmean = sqsum / lengths
var = sqmean - torch.pow(mean, 2.0)
return (x - mean) / torch.pow(var + self.eps, 0.5)
def get_inputs():
return [torch.rand([4, 16384, 4, 4]), torch.rand([4, 256, 4, 4])]
def get_init_inputs():
return [[], {'num_features': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_pow_sub_sum_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 4
x3 = xindex // 4
x1 = xindex // 4 % 16384
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x3), None)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x3), None)
tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x3), None)
tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x3), None)
tmp7 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 / tmp7
tmp9 = tmp0 * tmp0
tmp10 = tmp1 * tmp1
tmp11 = tmp9 + tmp10
tmp12 = tmp3 * tmp3
tmp13 = tmp11 + tmp12
tmp14 = tmp5 * tmp5
tmp15 = tmp13 + tmp14
tmp16 = tmp15 / tmp7
tmp17 = tmp8 * tmp8
tmp18 = tmp16 - tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.sqrt(tmp20)
tl.store(out_ptr0 + x4, tmp8, None)
tl.store(out_ptr1 + x4, tmp21, None)
@triton.jit
def triton_poi_fused_add_div_pow_sub_sum_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + 4 * x2), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 / tmp3
tl.store(out_ptr0 + x3, tmp4, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 256, 4, 4), (4096, 16, 4, 1))
assert_size_stride(arg1_1, (4, 16384, 4, 4), (262144, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16384, 1, 4), (65536, 4, 262144, 1),
torch.float32)
buf1 = empty_strided_cuda((4, 16384, 1, 4), (65536, 4, 262144, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_pow_sub_sum_0[grid(262144)](arg1_1, arg0_1,
buf0, buf1, 262144, XBLOCK=512, num_warps=8, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((4, 16384, 4, 4), (262144, 16, 4, 1),
torch.float32)
triton_poi_fused_add_div_pow_sub_sum_1[grid(1048576)](arg1_1, buf0,
buf1, buf2, 1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del arg1_1
del buf0
del buf1
return buf2,
class PaddedInstanceNorm1dNew(nn.Module):
def __init__(self, num_features, eps=1e-05, momentum=0.1, affine=False,
track_running_stats=False):
super().__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
if affine is True:
raise NotImplementedError
if track_running_stats is True:
raise NotImplementedError
def forward(self, input_0, input_1):
arg1_1 = input_0
arg0_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| shaun95/cotatron | PaddedInstanceNorm1d | false | 16,414 | [
"BSD-3-Clause"
] | 202 | 2d0254399a3063ba1d2f77bef535cc148041236e | https://github.com/shaun95/cotatron/tree/2d0254399a3063ba1d2f77bef535cc148041236e |
AtteMatchLay | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/dk/cdkstdndbosepqags2f4ezzhwapo4bsz42gpduzicr4a4elnju4y.py
# Topologically Sorted Source Nodes: [repres_1, max_att_1, temp], Original ATen: [aten.mul, aten.linalg_vector_norm, aten.clamp_min, aten.div, aten.sum]
# Source node to ATen node mapping:
# max_att_1 => mul_1
# repres_1 => mul
# temp => clamp_min, clamp_min_1, div, div_1, mul_2, pow_1, pow_2, pow_3, pow_4, sum_1, sum_2, sum_3
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze, %unsqueeze_1), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_2, %unsqueeze_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mul, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [2], True), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mul_1, 2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [2], True), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_2, 1e-08), kwargs = {})
# %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_4, 1e-08), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %clamp_min_1), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %clamp_min), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, %div), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [2]), kwargs = {})
triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_sum_0 = async_compile.triton('triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr2 + (4*x1), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr2 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr2 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr2 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 * tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 * tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 * tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = libdevice.sqrt(tmp18)
tmp20 = 1e-08
tmp21 = triton_helpers.maximum(tmp19, tmp20)
tmp23 = tmp22 * tmp1
tmp24 = tmp23 * tmp23
tmp26 = tmp25 * tmp5
tmp27 = tmp26 * tmp26
tmp28 = tmp24 + tmp27
tmp30 = tmp29 * tmp10
tmp31 = tmp30 * tmp30
tmp32 = tmp28 + tmp31
tmp34 = tmp33 * tmp15
tmp35 = tmp34 * tmp34
tmp36 = tmp32 + tmp35
tmp37 = libdevice.sqrt(tmp36)
tmp38 = triton_helpers.maximum(tmp37, tmp20)
tmp39 = tmp23 / tmp38
tmp40 = tmp2 / tmp21
tmp41 = tmp39 * tmp40
tmp42 = tmp26 / tmp38
tmp43 = tmp6 / tmp21
tmp44 = tmp42 * tmp43
tmp45 = tmp41 + tmp44
tmp46 = tmp30 / tmp38
tmp47 = tmp11 / tmp21
tmp48 = tmp46 * tmp47
tmp49 = tmp45 + tmp48
tmp50 = tmp34 / tmp38
tmp51 = tmp16 / tmp21
tmp52 = tmp50 * tmp51
tmp53 = tmp49 + tmp52
tl.store(in_out_ptr0 + (x2), tmp53, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (16, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (64, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4, 1), (4, 1, 256), torch.float32)
buf2 = reinterpret_tensor(buf0, (64, 4), (4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [repres_1, max_att_1, temp], Original ATen: [aten.mul, aten.linalg_vector_norm, aten.clamp_min, aten.div, aten.sum]
stream0 = get_raw_stream(0)
triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_sum_0.run(buf2, primals_2, primals_3, primals_1, 256, grid=grid(256), stream=stream0)
return (reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), primals_1, primals_2, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((16, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.nn.functional import cosine_similarity
def multi_perspective_expand_for_2D(in_tensor, decompose_params):
"""
Return: [batch_size, decompse_dim, dim]
"""
in_tensor = in_tensor.unsqueeze(1)
decompose_params = decompose_params.unsqueeze(0)
return torch.mul(in_tensor, decompose_params)
class AtteMatchLay(nn.Module):
def __init__(self, mp_dim, cont_dim):
super(AtteMatchLay, self).__init__()
self.cont_dim = cont_dim
self.mp_dim = mp_dim
self.register_parameter('weight', nn.Parameter(torch.Tensor(mp_dim,
cont_dim)))
self.weight.data.uniform_(-1.0, 1.0)
def forward(self, repres, max_att):
"""
Args:
repres - [bsz, a_len|q_len, cont_dim]
max_att - [bsz, q_len|a_len, cont_dim]
Return:
size - [bsz, sentence_len, mp_dim]
"""
bsz = repres.size(0)
sent_len = repres.size(1)
repres = repres.view(-1, self.cont_dim)
max_att = max_att.view(-1, self.cont_dim)
repres = multi_perspective_expand_for_2D(repres, self.weight)
max_att = multi_perspective_expand_for_2D(max_att, self.weight)
temp = cosine_similarity(repres, max_att, repres.dim() - 1)
return temp.view(bsz, sent_len, self.mp_dim)
def get_inputs():
return [torch.rand([16, 4, 4]), torch.rand([64, 4])]
def get_init_inputs():
return [[], {'mp_dim': 4, 'cont_dim': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_sum_0(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr2 + 4 * x1, xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr2 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp29 = tl.load(in_ptr2 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp33 = tl.load(in_ptr2 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 * tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 * tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 * tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = libdevice.sqrt(tmp18)
tmp20 = 1e-08
tmp21 = triton_helpers.maximum(tmp19, tmp20)
tmp23 = tmp22 * tmp1
tmp24 = tmp23 * tmp23
tmp26 = tmp25 * tmp5
tmp27 = tmp26 * tmp26
tmp28 = tmp24 + tmp27
tmp30 = tmp29 * tmp10
tmp31 = tmp30 * tmp30
tmp32 = tmp28 + tmp31
tmp34 = tmp33 * tmp15
tmp35 = tmp34 * tmp34
tmp36 = tmp32 + tmp35
tmp37 = libdevice.sqrt(tmp36)
tmp38 = triton_helpers.maximum(tmp37, tmp20)
tmp39 = tmp23 / tmp38
tmp40 = tmp2 / tmp21
tmp41 = tmp39 * tmp40
tmp42 = tmp26 / tmp38
tmp43 = tmp6 / tmp21
tmp44 = tmp42 * tmp43
tmp45 = tmp41 + tmp44
tmp46 = tmp30 / tmp38
tmp47 = tmp11 / tmp21
tmp48 = tmp46 * tmp47
tmp49 = tmp45 + tmp48
tmp50 = tmp34 / tmp38
tmp51 = tmp16 / tmp21
tmp52 = tmp50 * tmp51
tmp53 = tmp49 + tmp52
tl.store(in_out_ptr0 + x2, tmp53, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (16, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (64, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4, 1), (4, 1, 256), torch.float32)
buf2 = reinterpret_tensor(buf0, (64, 4), (4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_sum_0[grid(256)](
buf2, primals_2, primals_3, primals_1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0
), primals_1, primals_2, primals_3
def multi_perspective_expand_for_2D(in_tensor, decompose_params):
"""
Return: [batch_size, decompse_dim, dim]
"""
in_tensor = in_tensor.unsqueeze(1)
decompose_params = decompose_params.unsqueeze(0)
return torch.mul(in_tensor, decompose_params)
class AtteMatchLayNew(nn.Module):
def __init__(self, mp_dim, cont_dim):
super(AtteMatchLayNew, self).__init__()
self.cont_dim = cont_dim
self.mp_dim = mp_dim
self.register_parameter('weight', nn.Parameter(torch.Tensor(mp_dim,
cont_dim)))
self.weight.data.uniform_(-1.0, 1.0)
def forward(self, input_0, input_1):
primals_3 = self.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
| shinoyuki222/torch-light | AtteMatchLay | false | 16,415 | [
"MIT"
] | 310 | 4799805d9bcae82a9f12a574dcf9fdd838c92ee9 | https://github.com/shinoyuki222/torch-light/tree/4799805d9bcae82a9f12a574dcf9fdd838c92ee9 |
InnerProductDecoder | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/mb/cmb72vxh36b4k6lvmt4562lj3nrqtpyzst2qbon2yqx22gdjfa7x.py
# Topologically Sorted Source Nodes: [adj], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# adj => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%mm,), kwargs = {})
triton_poi_fused_sigmoid_0 = async_compile.triton('triton_poi_fused_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [z], Original ATen: [aten.native_dropout]
buf0 = torch.ops.aten.native_dropout.default(arg0_1, 0.1, True)
del arg0_1
buf1 = buf0[0]
del buf0
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mm], Original ATen: [aten.mm]
extern_kernels.mm(buf1, reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf3)
del buf1
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [adj], Original ATen: [aten.sigmoid]
stream0 = get_raw_stream(0)
triton_poi_fused_sigmoid_0.run(buf4, 16, grid=grid(16), stream=stream0)
return (buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class InnerProductDecoder(nn.Module):
def __init__(self, activation=torch.sigmoid, dropout=0.1):
super(InnerProductDecoder, self).__init__()
self.dropout = dropout
self.activation = activation
def forward(self, z):
z = F.dropout(z, self.dropout)
adj = self.activation(torch.mm(z, z.t()))
return adj
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten.native_dropout.default(arg0_1, 0.1, True)
del arg0_1
buf1 = buf0[0]
del buf0
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(buf1, (4, 4), (1, 4), 0),
out=buf3)
del buf1
buf4 = buf3
del buf3
get_raw_stream(0)
triton_poi_fused_sigmoid_0[grid(16)](buf4, 16, XBLOCK=16, num_warps
=1, num_stages=1)
return buf4,
class InnerProductDecoderNew(nn.Module):
def __init__(self, activation=torch.sigmoid, dropout=0.1):
super(InnerProductDecoderNew, self).__init__()
self.dropout = dropout
self.activation = activation
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| shionhonda/graph_ae | InnerProductDecoder | false | 16,416 | [
"MIT"
] | 48 | b8284a85286eee1b16cb90c0dd139d8927e83648 | https://github.com/shionhonda/graph_ae/tree/b8284a85286eee1b16cb90c0dd139d8927e83648 |
HyperpriorSynthesisDLMM | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/zu/czuua4sopi7uw2j4vhwnz6siwft4q3oub6yvsst7upuvdxgbdeip.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 102400
xnumel = 25
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 320
y1 = (yindex // 320)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (320*x2) + (8000*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/rs/crschneg3kvye3kuwch7myvnxivcwiau22prtweju442z2v6tr7s.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 1280
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 320
y1 = (yindex // 320)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (320*x2) + (5120*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/yp/cypk6shm66qwudlo2djtgfzqrzq2fyztupq3rysg7n557sbl4x4c.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 20480
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/gl/cglkcl5ykpgcv2uydrylvxb6dmvyihdwt3n735pkhiifwgzlyjgn.py
# Topologically Sorted Source Nodes: [conv_transpose2d, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv_transpose2d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [2, 2], [1, 1], True, [1, 1], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_3 = async_compile.triton('triton_poi_fused_convolution_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 81920
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 320
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/qc/cqcifbqljbi3jwpix3usk7lyjznx6mau7gwn475jaql4t2vmad47.py
# Topologically Sorted Source Nodes: [conv_transpose2d_1, x_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv_transpose2d_1 => convolution_1
# x_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [2, 2], [2, 2], [1, 1], True, [1, 1], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 327680
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 320
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/rr/crrnepphlcxumbxug76dqad5k6yukqznmhy2ehyblo25n7vv7jul.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_2 => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], True, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_5 = async_compile.triton('triton_poi_fused_convolution_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/pq/cpqkalbpw25e3evlemt6aztt5xhvq6zb3s6poshlca36wevc5gy7.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_3 => convolution_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_2, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_6 = async_compile.triton('triton_poi_fused_convolution_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096, 256], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_6(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 3072
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 768
y1 = (yindex // 768)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (768*x2) + (196608*y1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (256*y3)), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (320, 320, 5, 5), (8000, 25, 5, 1))
assert_size_stride(primals_2, (320, ), (1, ))
assert_size_stride(primals_3, (4, 320, 4, 4), (5120, 16, 4, 1))
assert_size_stride(primals_4, (320, 320, 5, 5), (8000, 25, 5, 1))
assert_size_stride(primals_5, (320, ), (1, ))
assert_size_stride(primals_6, (320, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (64, ), (1, ))
assert_size_stride(primals_8, (768, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_9, (768, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((320, 320, 5, 5), (8000, 1, 1600, 320), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 102400, 25, grid=grid(102400, 25), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 320, 4, 4), (5120, 1, 1280, 320), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_3, buf1, 1280, 16, grid=grid(1280, 16), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((320, 320, 5, 5), (8000, 1, 1600, 320), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_0.run(primals_4, buf2, 102400, 25, grid=grid(102400, 25), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((320, 64, 3, 3), (576, 1, 192, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_6, buf3, 20480, 9, grid=grid(20480, 9), stream=stream0)
del primals_6
# Topologically Sorted Source Nodes: [conv_transpose2d], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf1, buf0, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf4, (4, 320, 8, 8), (20480, 1, 2560, 320))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv_transpose2d, x], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_3.run(buf5, primals_2, 81920, grid=grid(81920), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv_transpose2d_1], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, buf2, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf6, (4, 320, 16, 16), (81920, 1, 5120, 320))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [conv_transpose2d_1, x_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf7, primals_5, 327680, grid=grid(327680), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 64, 16, 16), (16384, 1, 1024, 64))
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_5.run(buf9, primals_7, 65536, grid=grid(65536), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 768, 16, 16), (196608, 1, 12288, 768))
buf11 = empty_strided_cuda((4, 768, 16, 16), (196608, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
triton_poi_fused_convolution_6.run(buf10, primals_9, buf11, 3072, 256, grid=grid(3072, 256), stream=stream0)
del buf10
del primals_9
return (buf11, buf0, buf1, buf2, buf3, primals_8, buf5, buf7, buf9, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((320, 320, 5, 5), (8000, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((320, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 320, 4, 4), (5120, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((320, 320, 5, 5), (8000, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((320, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((320, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((768, 64, 1, 1), (64, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((768, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
def get_num_DLMM_channels(C, K=4, params=['mu', 'scale', 'mix']):
"""
C: Channels of latent representation (L3C uses 5).
K: Number of mixture coefficients.
"""
return C * K * len(params)
class HyperpriorSynthesisDLMM(nn.Module):
"""
Outputs distribution parameters of input latents, conditional on
hyperlatents, assuming a discrete logistic mixture model.
C: Number of output channels
"""
def __init__(self, C=64, N=320, activation='relu', final_activation=None):
super(HyperpriorSynthesisDLMM, self).__init__()
cnn_kwargs = dict(kernel_size=5, stride=2, padding=2, output_padding=1)
self.activation = getattr(F, activation)
self.final_activation = final_activation
self.conv1 = nn.ConvTranspose2d(N, N, **cnn_kwargs)
self.conv2 = nn.ConvTranspose2d(N, N, **cnn_kwargs)
self.conv3 = nn.ConvTranspose2d(N, C, kernel_size=3, stride=1,
padding=1)
self.conv_out = nn.Conv2d(C, get_num_DLMM_channels(C), kernel_size=
1, stride=1)
if self.final_activation is not None:
self.final_activation = getattr(F, final_activation)
def forward(self, x):
x = self.activation(self.conv1(x))
x = self.activation(self.conv2(x))
x = self.conv3(x)
x = self.conv_out(x)
if self.final_activation is not None:
x = self.final_activation(x)
return x
def get_inputs():
return [torch.rand([4, 320, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 320
y1 = yindex // 320
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 320 * x2 + 8000 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 1280
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 320
y1 = yindex // 320
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 320 * x2 + 5120 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 320
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 320
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_6(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 768
y1 = yindex // 768
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 768 * x2 + 196608 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 256 * y3), tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (320, 320, 5, 5), (8000, 25, 5, 1))
assert_size_stride(primals_2, (320,), (1,))
assert_size_stride(primals_3, (4, 320, 4, 4), (5120, 16, 4, 1))
assert_size_stride(primals_4, (320, 320, 5, 5), (8000, 25, 5, 1))
assert_size_stride(primals_5, (320,), (1,))
assert_size_stride(primals_6, (320, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (768, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_9, (768,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((320, 320, 5, 5), (8000, 1, 1600, 320),
torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(102400, 25)](primals_1, buf0, 102400, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 320, 4, 4), (5120, 1, 1280, 320),
torch.float32)
triton_poi_fused_1[grid(1280, 16)](primals_3, buf1, 1280, 16,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((320, 320, 5, 5), (8000, 1, 1600, 320),
torch.float32)
triton_poi_fused_0[grid(102400, 25)](primals_4, buf2, 102400, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((320, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_2[grid(20480, 9)](primals_6, buf3, 20480, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = extern_kernels.convolution(buf1, buf0, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=True,
output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf4, (4, 320, 8, 8), (20480, 1, 2560, 320))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_3[grid(81920)](buf5, primals_2,
81920, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf6 = extern_kernels.convolution(buf5, buf2, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=True,
output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf6, (4, 320, 16, 16), (81920, 1, 5120, 320))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_4[grid(327680)](buf7, primals_5,
327680, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf8 = extern_kernels.convolution(buf7, buf3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 64, 16, 16), (16384, 1, 1024, 64))
buf9 = buf8
del buf8
triton_poi_fused_convolution_5[grid(65536)](buf9, primals_7, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
del primals_7
buf10 = extern_kernels.convolution(buf9, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 768, 16, 16), (196608, 1, 12288, 768))
buf11 = empty_strided_cuda((4, 768, 16, 16), (196608, 256, 16, 1),
torch.float32)
triton_poi_fused_convolution_6[grid(3072, 256)](buf10, primals_9,
buf11, 3072, 256, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del buf10
del primals_9
return buf11, buf0, buf1, buf2, buf3, primals_8, buf5, buf7, buf9
def get_num_DLMM_channels(C, K=4, params=['mu', 'scale', 'mix']):
"""
C: Channels of latent representation (L3C uses 5).
K: Number of mixture coefficients.
"""
return C * K * len(params)
class HyperpriorSynthesisDLMMNew(nn.Module):
"""
Outputs distribution parameters of input latents, conditional on
hyperlatents, assuming a discrete logistic mixture model.
C: Number of output channels
"""
def __init__(self, C=64, N=320, activation='relu', final_activation=None):
super(HyperpriorSynthesisDLMMNew, self).__init__()
cnn_kwargs = dict(kernel_size=5, stride=2, padding=2, output_padding=1)
self.activation = getattr(F, activation)
self.final_activation = final_activation
self.conv1 = nn.ConvTranspose2d(N, N, **cnn_kwargs)
self.conv2 = nn.ConvTranspose2d(N, N, **cnn_kwargs)
self.conv3 = nn.ConvTranspose2d(N, C, kernel_size=3, stride=1,
padding=1)
self.conv_out = nn.Conv2d(C, get_num_DLMM_channels(C), kernel_size=
1, stride=1)
if self.final_activation is not None:
self.final_activation = getattr(F, final_activation)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv_out.weight
primals_9 = self.conv_out.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| sedrickkeh/high-fidelity-dual-image | HyperpriorSynthesisDLMM | false | 16,417 | [
"Apache-2.0"
] | 266 | 9cefd378467826b91596653df38666e469bb23e0 | https://github.com/sedrickkeh/high-fidelity-dual-image/tree/9cefd378467826b91596653df38666e469bb23e0 |
CrossEntropy | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/7l/c7l3ub2upupvsasdpywg3gprcsoaujygvvtujpjy6husp5fukku6.py
# Topologically Sorted Source Nodes: [gt, mask, mul, sum_1, neg, sum_2, truediv], Original ATen: [aten.gt, aten._to_copy, aten.mul, aten.sum, aten.neg, aten.div]
# Source node to ATen node mapping:
# gt => gt
# mask => convert_element_type
# mul => mul
# neg => neg
# sum_1 => sum_1
# sum_2 => sum_2
# truediv => div
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg1_1, 0), kwargs = {})
# %convert_element_type : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%gt, torch.float32), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, %convert_element_type), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_1,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%convert_element_type,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%neg, %sum_2), kwargs = {})
triton_per_fused__to_copy_div_gt_mul_neg_sum_0 = async_compile.triton('triton_per_fused__to_copy_div_gt_mul_neg_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_div_gt_mul_neg_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__to_copy_div_gt_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4), "index out of bounds: 0 <= tmp4 < 4")
tmp6 = tl.load(in_ptr1 + (tmp4 + (4*r0)), None, eviction_policy='evict_last')
tmp7 = tmp6.to(tl.float32)
tmp8 = tl.full([1, 1], 0, tl.int64)
tmp9 = tmp0 > tmp8
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp7 * tmp10
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.sum(tmp12, 1)[:, None]
tmp15 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp17 = tl.sum(tmp15, 1)[:, None]
tmp18 = -tmp14
tmp19 = tmp18 / tmp17
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp19, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [gt, mask, mul, sum_1, neg, sum_2, truediv], Original ATen: [aten.gt, aten._to_copy, aten.mul, aten.sum, aten.neg, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused__to_copy_div_gt_mul_neg_sum_0.run(buf2, arg1_1, arg0_1, 1, 16, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.int64)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.int64)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class CrossEntropy(nn.Module):
def __init__(self):
super().__init__()
def forward(self, props, tgt):
tgt_props = props.gather(2, tgt.unsqueeze(2)).squeeze()
mask = (tgt > 0).float()
return -(tgt_props * mask).sum() / mask.sum()
def get_inputs():
return [torch.ones([4, 4, 4], dtype=torch.int64), torch.ones([4, 4],
dtype=torch.int64)]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__to_copy_div_gt_mul_neg_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4),
'index out of bounds: 0 <= tmp4 < 4')
tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * r0), None, eviction_policy=
'evict_last')
tmp7 = tmp6.to(tl.float32)
tmp8 = tl.full([1, 1], 0, tl.int64)
tmp9 = tmp0 > tmp8
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp7 * tmp10
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.sum(tmp12, 1)[:, None]
tmp15 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp17 = tl.sum(tmp15, 1)[:, None]
tmp18 = -tmp14
tmp19 = tmp18 / tmp17
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp19, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused__to_copy_div_gt_mul_neg_sum_0[grid(1)](buf2,
arg1_1, arg0_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf2,
class CrossEntropyNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| shinoyuki222/torch-light | CrossEntropy | false | 16,418 | [
"MIT"
] | 310 | 4799805d9bcae82a9f12a574dcf9fdd838c92ee9 | https://github.com/shinoyuki222/torch-light/tree/4799805d9bcae82a9f12a574dcf9fdd838c92ee9 |
HyperpriorSynthesis | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/zu/czuua4sopi7uw2j4vhwnz6siwft4q3oub6yvsst7upuvdxgbdeip.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 102400
xnumel = 25
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 320
y1 = (yindex // 320)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (320*x2) + (8000*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/rs/crschneg3kvye3kuwch7myvnxivcwiau22prtweju442z2v6tr7s.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 1280
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 320
y1 = (yindex // 320)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (320*x2) + (5120*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/f5/cf5rahjtn7v4v36kwvj65zjpf4ce46w2hyul4dndgpxx4vwwwjzr.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 70400
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 220
y1 = (yindex // 220)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (220*x2) + (1980*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/gl/cglkcl5ykpgcv2uydrylvxb6dmvyihdwt3n735pkhiifwgzlyjgn.py
# Topologically Sorted Source Nodes: [conv_transpose2d, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv_transpose2d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [2, 2], [1, 1], True, [1, 1], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_3 = async_compile.triton('triton_poi_fused_convolution_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 81920
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 320
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/qc/cqcifbqljbi3jwpix3usk7lyjznx6mau7gwn475jaql4t2vmad47.py
# Topologically Sorted Source Nodes: [conv_transpose2d_1, x_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv_transpose2d_1 => convolution_1
# x_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [2, 2], [2, 2], [1, 1], True, [1, 1], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 327680
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 320
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/o6/co6yb7vc327wwyteuvhwzyuiictgb2lllnxydf7efevmaftxhgnt.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_2 => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], True, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_5 = async_compile.triton('triton_poi_fused_convolution_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024, 256], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_5(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 880
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 220
y1 = (yindex // 220)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (220*x2) + (56320*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (256*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (320, 320, 5, 5), (8000, 25, 5, 1))
assert_size_stride(primals_2, (320, ), (1, ))
assert_size_stride(primals_3, (4, 320, 4, 4), (5120, 16, 4, 1))
assert_size_stride(primals_4, (320, 320, 5, 5), (8000, 25, 5, 1))
assert_size_stride(primals_5, (320, ), (1, ))
assert_size_stride(primals_6, (320, 220, 3, 3), (1980, 9, 3, 1))
assert_size_stride(primals_7, (220, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((320, 320, 5, 5), (8000, 1, 1600, 320), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 102400, 25, grid=grid(102400, 25), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 320, 4, 4), (5120, 1, 1280, 320), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_3, buf1, 1280, 16, grid=grid(1280, 16), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((320, 320, 5, 5), (8000, 1, 1600, 320), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_0.run(primals_4, buf2, 102400, 25, grid=grid(102400, 25), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((320, 220, 3, 3), (1980, 1, 660, 220), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_6, buf3, 70400, 9, grid=grid(70400, 9), stream=stream0)
del primals_6
# Topologically Sorted Source Nodes: [conv_transpose2d], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf1, buf0, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf4, (4, 320, 8, 8), (20480, 1, 2560, 320))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv_transpose2d, x], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_3.run(buf5, primals_2, 81920, grid=grid(81920), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv_transpose2d_1], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, buf2, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf6, (4, 320, 16, 16), (81920, 1, 5120, 320))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [conv_transpose2d_1, x_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf7, primals_5, 327680, grid=grid(327680), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 220, 16, 16), (56320, 1, 3520, 220))
buf9 = empty_strided_cuda((4, 220, 16, 16), (56320, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_5.run(buf8, primals_7, buf9, 880, 256, grid=grid(880, 256), stream=stream0)
del buf8
del primals_7
return (buf9, buf0, buf1, buf2, buf3, buf5, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((320, 320, 5, 5), (8000, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((320, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 320, 4, 4), (5120, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((320, 320, 5, 5), (8000, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((320, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((320, 220, 3, 3), (1980, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((220, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class HyperpriorSynthesis(nn.Module):
"""
Hyperprior 'synthesis model' as proposed in [1]. Outputs
distribution parameters of input latents.
[1] Ballé et. al., "Variational image compression with a scale hyperprior",
arXiv:1802.01436 (2018).
C: Number of output channels
"""
def __init__(self, C=220, N=320, activation='relu', final_activation=None):
super(HyperpriorSynthesis, self).__init__()
cnn_kwargs = dict(kernel_size=5, stride=2, padding=2, output_padding=1)
self.activation = getattr(F, activation)
self.final_activation = final_activation
self.conv1 = nn.ConvTranspose2d(N, N, **cnn_kwargs)
self.conv2 = nn.ConvTranspose2d(N, N, **cnn_kwargs)
self.conv3 = nn.ConvTranspose2d(N, C, kernel_size=3, stride=1,
padding=1)
if self.final_activation is not None:
self.final_activation = getattr(F, final_activation)
def forward(self, x):
x = self.activation(self.conv1(x))
x = self.activation(self.conv2(x))
x = self.conv3(x)
if self.final_activation is not None:
x = self.final_activation(x)
return x
def get_inputs():
return [torch.rand([4, 320, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 320
y1 = yindex // 320
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 320 * x2 + 8000 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 1280
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 320
y1 = yindex // 320
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 320 * x2 + 5120 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 70400
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 220
y1 = yindex // 220
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 220 * x2 + 1980 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 320
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 320
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_5(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 880
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 220
y1 = yindex // 220
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 220 * x2 + 56320 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 256 * y3), tmp2, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (320, 320, 5, 5), (8000, 25, 5, 1))
assert_size_stride(primals_2, (320,), (1,))
assert_size_stride(primals_3, (4, 320, 4, 4), (5120, 16, 4, 1))
assert_size_stride(primals_4, (320, 320, 5, 5), (8000, 25, 5, 1))
assert_size_stride(primals_5, (320,), (1,))
assert_size_stride(primals_6, (320, 220, 3, 3), (1980, 9, 3, 1))
assert_size_stride(primals_7, (220,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((320, 320, 5, 5), (8000, 1, 1600, 320),
torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(102400, 25)](primals_1, buf0, 102400, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 320, 4, 4), (5120, 1, 1280, 320),
torch.float32)
triton_poi_fused_1[grid(1280, 16)](primals_3, buf1, 1280, 16,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((320, 320, 5, 5), (8000, 1, 1600, 320),
torch.float32)
triton_poi_fused_0[grid(102400, 25)](primals_4, buf2, 102400, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((320, 220, 3, 3), (1980, 1, 660, 220),
torch.float32)
triton_poi_fused_2[grid(70400, 9)](primals_6, buf3, 70400, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = extern_kernels.convolution(buf1, buf0, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=True,
output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf4, (4, 320, 8, 8), (20480, 1, 2560, 320))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_3[grid(81920)](buf5, primals_2,
81920, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf6 = extern_kernels.convolution(buf5, buf2, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=True,
output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf6, (4, 320, 16, 16), (81920, 1, 5120, 320))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_4[grid(327680)](buf7, primals_5,
327680, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf8 = extern_kernels.convolution(buf7, buf3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 220, 16, 16), (56320, 1, 3520, 220))
buf9 = empty_strided_cuda((4, 220, 16, 16), (56320, 256, 16, 1),
torch.float32)
triton_poi_fused_convolution_5[grid(880, 256)](buf8, primals_7,
buf9, 880, 256, XBLOCK=256, YBLOCK=4, num_warps=4, num_stages=1)
del buf8
del primals_7
return buf9, buf0, buf1, buf2, buf3, buf5, buf7
class HyperpriorSynthesisNew(nn.Module):
"""
Hyperprior 'synthesis model' as proposed in [1]. Outputs
distribution parameters of input latents.
[1] Ballé et. al., "Variational image compression with a scale hyperprior",
arXiv:1802.01436 (2018).
C: Number of output channels
"""
def __init__(self, C=220, N=320, activation='relu', final_activation=None):
super(HyperpriorSynthesisNew, self).__init__()
cnn_kwargs = dict(kernel_size=5, stride=2, padding=2, output_padding=1)
self.activation = getattr(F, activation)
self.final_activation = final_activation
self.conv1 = nn.ConvTranspose2d(N, N, **cnn_kwargs)
self.conv2 = nn.ConvTranspose2d(N, N, **cnn_kwargs)
self.conv3 = nn.ConvTranspose2d(N, C, kernel_size=3, stride=1,
padding=1)
if self.final_activation is not None:
self.final_activation = getattr(F, final_activation)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| sedrickkeh/high-fidelity-dual-image | HyperpriorSynthesis | false | 16,419 | [
"Apache-2.0"
] | 266 | 9cefd378467826b91596653df38666e469bb23e0 | https://github.com/sedrickkeh/high-fidelity-dual-image/tree/9cefd378467826b91596653df38666e469bb23e0 |
BasicUNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/pq/cpqjngmwse22zuc3kj4qnpvtiudxh7xrch2gwzvtjb5saxhz2ubd.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 8
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/re/crevsbgpcncsm7gtijpfbb2urj47a2o76s4ukpid2yml4qg4v4ui.py
# Topologically Sorted Source Nodes: [conv2d_1, instance_norm, x, x_19], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.leaky_relu, aten.cat]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# instance_norm => add, rsqrt, var_mean
# x => gt, mul_1, where
# x_19 => cat_3
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.001), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul_1), kwargs = {})
# %cat_3 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%_unsafe_index_3, %view_3], 1), kwargs = {})
triton_red_fused__native_batch_norm_legit_cat_convolution_leaky_relu_1 = async_compile.triton('triton_red_fused__native_batch_norm_legit_cat_convolution_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[32, 4096],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__native_batch_norm_legit_cat_convolution_leaky_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused__native_batch_norm_legit_cat_convolution_leaky_relu_1(in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 32
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x3 = xindex
x0 = xindex % 8
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_out_ptr0 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = triton_helpers.welford_reduce(
tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0
)
tmp4_mean = tl.where(rmask & xmask, tmp4_mean_next, tmp4_mean)
tmp4_m2 = tl.where(rmask & xmask, tmp4_m2_next, tmp4_m2)
tmp4_weight = tl.where(rmask & xmask, tmp4_weight_next, tmp4_weight)
tl.store(in_out_ptr0 + (r2 + (4096*x3)), tmp2, rmask & xmask)
tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford(
tmp4_mean, tmp4_m2, tmp4_weight, 1
)
tmp4 = tmp4_tmp[:, None]
tmp5 = tmp5_tmp[:, None]
tmp6 = tmp6_tmp[:, None]
tl.store(out_ptr0 + (x3), tmp4, xmask)
x1 = (xindex // 8)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp7 = tl.load(in_out_ptr0 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp8 = tmp7 - tmp4
tmp9 = 4096.0
tmp10 = tmp5 / tmp9
tmp11 = 1e-05
tmp12 = tmp10 + tmp11
tmp13 = libdevice.rsqrt(tmp12)
tmp14 = tmp8 * tmp13
tmp15 = 0.0
tmp16 = tmp14 > tmp15
tmp17 = 0.001
tmp18 = tmp14 * tmp17
tmp19 = tl.where(tmp16, tmp14, tmp18)
tl.store(out_ptr2 + (r2 + (4096*x3)), tmp19, rmask & xmask)
tl.store(out_ptr3 + (r2 + (4096*x0) + (98304*x1)), tmp19, rmask & xmask)
tmp20 = 4096.0
tmp21 = tmp5 / tmp20
tmp22 = 1e-05
tmp23 = tmp21 + tmp22
tmp24 = libdevice.rsqrt(tmp23)
tl.store(out_ptr4 + (x3), tmp24, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/sh/cshsrfbnj2mtr3hy4an3yzo5bfnis2uevulk23oo25xlkwuosirt.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_1 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_2 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = (xindex // 32)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/hu/chuad7jvtwztyl4nplnzkib7dg6ydis2y3cluo7oqurhs4ds6uc4.py
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_3 = async_compile.triton('triton_poi_fused_convolution_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/st/cstm5mby3hsk6nxv3526eogtkhr6z66w7e24do6b3yqjxz24phdj.py
# Topologically Sorted Source Nodes: [conv2d_3, instance_norm_1, x_2, x_16], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.leaky_relu, aten.cat]
# Source node to ATen node mapping:
# conv2d_3 => convolution_3
# instance_norm_1 => add_1, rsqrt_1, var_mean_1
# x_16 => cat_2
# x_2 => gt_1, mul_3, where_1
# Graph fragment:
# %convolution_3 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_2, %primals_8, %primals_9, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_5, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_4, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_6, 0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_6, 0.001), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %view_6, %mul_3), kwargs = {})
# %cat_2 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%_unsafe_index_2, %view_8], 1), kwargs = {})
triton_per_fused__native_batch_norm_legit_cat_convolution_leaky_relu_4 = async_compile.triton('triton_per_fused__native_batch_norm_legit_cat_convolution_leaky_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 1024],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_cat_convolution_leaky_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_cat_convolution_leaky_relu_4(in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel):
xnumel = 64
XBLOCK: tl.constexpr = 1
rnumel = 1024
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 16
x1 = (xindex // 16)
tmp0 = tl.load(in_out_ptr0 + (r2 + (1024*x3)), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = tl.broadcast_to(tmp3, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = tl.full([1], 1024, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp3 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = tmp2 - tmp10
tmp17 = 1024.0
tmp18 = tmp15 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp22 = tmp16 * tmp21
tmp23 = 0.0
tmp24 = tmp22 > tmp23
tmp25 = 0.001
tmp26 = tmp22 * tmp25
tmp27 = tl.where(tmp24, tmp22, tmp26)
tl.store(in_out_ptr0 + (r2 + (1024*x3)), tmp2, None)
tl.store(out_ptr2 + (r2 + (1024*x3)), tmp27, None)
tl.store(out_ptr3 + (r2 + (1024*x0) + (49152*x1)), tmp27, None)
tl.store(out_ptr4 + (x3), tmp21, None)
tl.store(out_ptr0 + (x3), tmp10, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ai/caibkfk3bxq2godfssvsctmvar3lgxf47lj4ty6ik7crctqnsgx4.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_3 => getitem_6, getitem_7
# Graph fragment:
# %getitem_6 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_7 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xb/cxbxouxgqz6pe3dnz4qi3mypv3zitzwlosr33sjcsozlwh4jvawe.py
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_4 => convolution_4
# Graph fragment:
# %convolution_4 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_6, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_6 = async_compile.triton('triton_poi_fused_convolution_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/bf/cbfqa3dhe3dimlzknkdc7zb6af2jtnjwgpyvkqwfkf7bu57gu527.py
# Topologically Sorted Source Nodes: [conv2d_5, instance_norm_2, x_4, x_13], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.leaky_relu, aten.cat]
# Source node to ATen node mapping:
# conv2d_5 => convolution_5
# instance_norm_2 => add_2, rsqrt_2, var_mean_2
# x_13 => cat_1
# x_4 => gt_2, mul_5, where_2
# Graph fragment:
# %convolution_5 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_4, %primals_12, %primals_13, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean_2 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_10, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_8, 1e-05), kwargs = {})
# %rsqrt_2 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {})
# %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_11, 0), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_11, 0.001), kwargs = {})
# %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %view_11, %mul_5), kwargs = {})
# %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%_unsafe_index_1, %view_13], 1), kwargs = {})
triton_per_fused__native_batch_norm_legit_cat_convolution_leaky_relu_7 = async_compile.triton('triton_per_fused__native_batch_norm_legit_cat_convolution_leaky_relu_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[128, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_cat_convolution_leaky_relu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_cat_convolution_leaky_relu_7(in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel):
xnumel = 128
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 32
x1 = (xindex // 32)
tmp0 = tl.load(in_out_ptr0 + (r2 + (256*x3)), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = tl.broadcast_to(tmp3, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = tl.full([1], 256, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp3 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = tmp2 - tmp10
tmp17 = 256.0
tmp18 = tmp15 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp22 = tmp16 * tmp21
tmp23 = 0.0
tmp24 = tmp22 > tmp23
tmp25 = 0.001
tmp26 = tmp22 * tmp25
tmp27 = tl.where(tmp24, tmp22, tmp26)
tl.store(in_out_ptr0 + (r2 + (256*x3)), tmp2, None)
tl.store(out_ptr2 + (r2 + (256*x3)), tmp27, None)
tl.store(out_ptr3 + (r2 + (256*x0) + (24576*x1)), tmp27, None)
tl.store(out_ptr4 + (x3), tmp21, None)
tl.store(out_ptr0 + (x3), tmp10, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/a6/ca6cng7bbgns4ws4hd4a7i6mlc7rafvbvztrwmqqg35zlvtb5u63.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_5 => getitem_10, getitem_11
# Graph fragment:
# %getitem_10 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {})
# %getitem_11 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_8 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/il/cilpbahuw43f4gxxbrv3anps3rukxekc7fjqqs2cjnj47pqyt6jy.py
# Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_6 => convolution_6
# Graph fragment:
# %convolution_6 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_10, %primals_14, %primals_15, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_9 = async_compile.triton('triton_poi_fused_convolution_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/gn/cgnazflyomi7j3anmll4pphpqq7eqwkadm5xgd6w5g3xunkxolu4.py
# Topologically Sorted Source Nodes: [conv2d_7, instance_norm_3, x_6, x_10], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.leaky_relu, aten.cat]
# Source node to ATen node mapping:
# conv2d_7 => convolution_7
# instance_norm_3 => add_3, rsqrt_3, var_mean_3
# x_10 => cat
# x_6 => gt_3, mul_7, where_3
# Graph fragment:
# %convolution_7 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_6, %primals_16, %primals_17, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean_3 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_15, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_12, 1e-05), kwargs = {})
# %rsqrt_3 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_3,), kwargs = {})
# %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_16, 0), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_16, 0.001), kwargs = {})
# %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %view_16, %mul_7), kwargs = {})
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%_unsafe_index, %view_18], 1), kwargs = {})
triton_per_fused__native_batch_norm_legit_cat_convolution_leaky_relu_10 = async_compile.triton('triton_per_fused__native_batch_norm_legit_cat_convolution_leaky_relu_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[256, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_cat_convolution_leaky_relu_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_cat_convolution_leaky_relu_10(in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 256
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 64
x1 = (xindex // 64)
tmp0 = tl.load(in_out_ptr0 + (r2 + (64*x3)), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = tmp2 - tmp12
tmp20 = 64.0
tmp21 = tmp18 / tmp20
tmp22 = 1e-05
tmp23 = tmp21 + tmp22
tmp24 = libdevice.rsqrt(tmp23)
tmp25 = tmp19 * tmp24
tmp26 = 0.0
tmp27 = tmp25 > tmp26
tmp28 = 0.001
tmp29 = tmp25 * tmp28
tmp30 = tl.where(tmp27, tmp25, tmp29)
tl.store(in_out_ptr0 + (r2 + (64*x3)), tmp2, xmask)
tl.store(out_ptr2 + (r2 + (64*x3)), tmp30, xmask)
tl.store(out_ptr3 + (r2 + (64*x0) + (12288*x1)), tmp30, xmask)
tl.store(out_ptr4 + (x3), tmp24, xmask)
tl.store(out_ptr0 + (x3), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/zq/czqwfdofxuzz35sucicv76svoxvr6bxavlnp23hrvm376giozxl5.py
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_7 => getitem_14, getitem_15
# Graph fragment:
# %getitem_14 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 0), kwargs = {})
# %getitem_15 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_11 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_11(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (16*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (16*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (8 + (2*x0) + (16*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (9 + (2*x0) + (16*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/q6/cq6iqe67v47sxb2twrp6akluleocwor2raq7i7r4msdb4eghjs34.py
# Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_8 => convolution_8
# Graph fragment:
# %convolution_8 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_14, %primals_18, %primals_19, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_12 = async_compile.triton('triton_poi_fused_convolution_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_12', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_12(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 128
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ju/cjuu4bweldtbcet4dlo4oj4dlgswtzyovjjidvtdgfs7mxl63oao.py
# Topologically Sorted Source Nodes: [conv2d_9, instance_norm_4], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# conv2d_9 => convolution_9
# instance_norm_4 => add_4, rsqrt_4, var_mean_4
# Graph fragment:
# %convolution_9 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_8, %primals_20, %primals_21, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean_4 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_20, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_16, 1e-05), kwargs = {})
# %rsqrt_4 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
triton_per_fused__native_batch_norm_legit_convolution_13 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[512, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_13', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_13(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 512
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (r2 + (16*x3)), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp18 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(in_out_ptr0 + (r2 + (16*x3)), tmp2, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x3), tmp23, xmask)
tl.store(out_ptr0 + (x3), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/3a/c3aubsagukfcpdcv6lvaiwj3g2w7n7lrm2yheaopofnv2r7a4j5d.py
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# x_9 => add_5, add_6, convert_element_type, convert_element_type_1, iota, mul_10, mul_11
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (8,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota, 1), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_10, 0), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_5, torch.float32), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.0), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_6, 0.5), kwargs = {})
# %convert_element_type_1 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_11, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_14 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_14(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/f7/cf7dthhw32pjvubbyyxwlnsmrcqsx6cz5hlqzjpsm57sygqrsxto.py
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten._unsafe_index]
# Source node to ATen node mapping:
# x_9 => _unsafe_index
# Graph fragment:
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%view_23, [None, None, %unsqueeze, %convert_element_type_1]), kwargs = {})
triton_poi_fused__unsafe_index_15 = async_compile.triton('triton_poi_fused__unsafe_index_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_15(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 8) % 8
x0 = xindex % 8
x5 = (xindex // 64)
x3 = (xindex // 8192)
x6 = xindex % 8192
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (x5), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr3 + (x5), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + (4*tmp4) + (16*x5)), None, eviction_policy='evict_last')
tmp11 = tmp9 - tmp10
tmp13 = tmp11 * tmp12
tmp14 = 0.0
tmp15 = tmp13 > tmp14
tmp16 = 0.001
tmp17 = tmp13 * tmp16
tmp18 = tl.where(tmp15, tmp13, tmp17)
tl.store(out_ptr0 + (x6 + (12288*x3)), tmp18, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/oy/coywjqjjl6oisptt43ssfqgz5plb7pbnuqonvovyedw3ahbs3ee2.py
# Topologically Sorted Source Nodes: [conv2d_11, instance_norm_5], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# conv2d_11 => convolution_11
# instance_norm_5 => add_9, rsqrt_5, var_mean_5
# Graph fragment:
# %convolution_11 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_10, %primals_24, %primals_25, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean_5 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_25, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_18, 1e-05), kwargs = {})
# %rsqrt_5 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_9,), kwargs = {})
triton_per_fused__native_batch_norm_legit_convolution_16 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[256, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_16', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_16(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 256
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (r2 + (64*x3)), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 64.0
tmp20 = tmp18 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(in_out_ptr0 + (r2 + (64*x3)), tmp2, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x3), tmp23, xmask)
tl.store(out_ptr0 + (x3), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/jy/cjydlz6d2j3sa34jmwpihh7wvfrvefqo3gztcn6coqyocogtsjim.py
# Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# x_12 => add_10, add_11, convert_element_type_4, convert_element_type_5, iota_2, mul_16, mul_17
# Graph fragment:
# %iota_2 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (16,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_2, 1), kwargs = {})
# %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_16, 0), kwargs = {})
# %convert_element_type_4 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_10, torch.float32), kwargs = {})
# %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_4, 0.0), kwargs = {})
# %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_11, 0.5), kwargs = {})
# %convert_element_type_5 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_17, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_17 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_17', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_17', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_17(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/as/casjatkrnf4ifydtmzj3npyhwhnit25odbq4rn6jzoi24ssoek52.py
# Topologically Sorted Source Nodes: [x_12], Original ATen: [aten._unsafe_index]
# Source node to ATen node mapping:
# x_12 => _unsafe_index_1
# Graph fragment:
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%view_28, [None, None, %unsqueeze_1, %convert_element_type_5]), kwargs = {})
triton_poi_fused__unsafe_index_18 = async_compile.triton('triton_poi_fused__unsafe_index_18', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_18', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_18(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 16) % 16
x0 = xindex % 16
x5 = (xindex // 256)
x3 = (xindex // 16384)
x6 = xindex % 16384
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (x5), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr3 + (x5), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 8, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + (8*tmp4) + (64*x5)), None, eviction_policy='evict_last')
tmp11 = tmp9 - tmp10
tmp13 = tmp11 * tmp12
tmp14 = 0.0
tmp15 = tmp13 > tmp14
tmp16 = 0.001
tmp17 = tmp13 * tmp16
tmp18 = tl.where(tmp15, tmp13, tmp17)
tl.store(out_ptr0 + (x6 + (24576*x3)), tmp18, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/vh/cvhshvzmyuttgt5xircobicqxklnme7qkcvftaf5jelrqyf5xv2t.py
# Topologically Sorted Source Nodes: [conv2d_13, instance_norm_6], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# conv2d_13 => convolution_13
# instance_norm_6 => add_14, rsqrt_6, var_mean_6
# Graph fragment:
# %convolution_13 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_12, %primals_28, %primals_29, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean_6 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_30, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_20, 1e-05), kwargs = {})
# %rsqrt_6 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_14,), kwargs = {})
triton_per_fused__native_batch_norm_legit_convolution_19 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_19', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[128, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_19', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_19(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel):
xnumel = 128
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (r2 + (256*x3)), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = tl.broadcast_to(tmp3, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = tl.full([1], 256, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp3 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tl.store(in_out_ptr0 + (r2 + (256*x3)), tmp2, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x3), tmp20, None)
tl.store(out_ptr0 + (x3), tmp10, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/wg/cwgxtc4y3ifd3t7fprczhrjtdxlfclh54p7dwvlgwkaet2cxhnox.py
# Topologically Sorted Source Nodes: [x_15], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# x_15 => add_15, add_16, convert_element_type_8, convert_element_type_9, iota_4, mul_22, mul_23
# Graph fragment:
# %iota_4 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (32,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %mul_22 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_4, 1), kwargs = {})
# %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_22, 0), kwargs = {})
# %convert_element_type_8 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_15, torch.float32), kwargs = {})
# %add_16 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_8, 0.0), kwargs = {})
# %mul_23 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_16, 0.5), kwargs = {})
# %convert_element_type_9 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_23, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_20 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_20', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_20', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_20(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/zd/czdtppr7on5ukrbw7oa2vmiq6lcdfvqkkpqhplp234xourcimcur.py
# Topologically Sorted Source Nodes: [x_15], Original ATen: [aten._unsafe_index]
# Source node to ATen node mapping:
# x_15 => _unsafe_index_2
# Graph fragment:
# %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%view_33, [None, None, %unsqueeze_2, %convert_element_type_9]), kwargs = {})
triton_poi_fused__unsafe_index_21 = async_compile.triton('triton_poi_fused__unsafe_index_21', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_21', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_21(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 32) % 32
x0 = xindex % 32
x5 = (xindex // 1024)
x3 = (xindex // 32768)
x6 = xindex % 32768
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (x5), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr3 + (x5), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 16, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + (16*tmp4) + (256*x5)), None, eviction_policy='evict_last')
tmp11 = tmp9 - tmp10
tmp13 = tmp11 * tmp12
tmp14 = 0.0
tmp15 = tmp13 > tmp14
tmp16 = 0.001
tmp17 = tmp13 * tmp16
tmp18 = tl.where(tmp15, tmp13, tmp17)
tl.store(out_ptr0 + (x6 + (49152*x3)), tmp18, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/z3/cz3lcykbprjlamaknn5db2i7ryiolkde6zhvhhzzfx4h6jjztb2g.py
# Topologically Sorted Source Nodes: [conv2d_15, instance_norm_7], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# conv2d_15 => convolution_15
# instance_norm_7 => add_19, rsqrt_7, var_mean_7
# Graph fragment:
# %convolution_15 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_14, %primals_32, %primals_33, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean_7 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_35, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_19 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_22, 1e-05), kwargs = {})
# %rsqrt_7 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_19,), kwargs = {})
triton_per_fused__native_batch_norm_legit_convolution_22 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_22', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 1024],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_22', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_22(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel):
xnumel = 64
XBLOCK: tl.constexpr = 1
rnumel = 1024
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + (r2 + (1024*x3)), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = tl.broadcast_to(tmp3, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = tl.full([1], 1024, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp3 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 1024.0
tmp17 = tmp15 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tl.store(in_out_ptr0 + (r2 + (1024*x3)), tmp2, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x3), tmp20, None)
tl.store(out_ptr0 + (x3), tmp10, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/c5/cc5kfslnxxf5yiyrkzekedympw26a26w2725vwdxkct2t2yzm42p.py
# Topologically Sorted Source Nodes: [x_18], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# x_18 => add_20, add_21, convert_element_type_12, convert_element_type_13, iota_6, mul_28, mul_29
# Graph fragment:
# %iota_6 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (64,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %mul_28 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_6, 1), kwargs = {})
# %add_20 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_28, 0), kwargs = {})
# %convert_element_type_12 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_20, torch.float32), kwargs = {})
# %add_21 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_12, 0.0), kwargs = {})
# %mul_29 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_21, 0.5), kwargs = {})
# %convert_element_type_13 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_29, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_23 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_23', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_23', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_23(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/2r/c2rkoqzf6ty2yq2635wq5e2di4vxcsnrkbgyobrye4uozvduzy2b.py
# Topologically Sorted Source Nodes: [x_18], Original ATen: [aten._unsafe_index]
# Source node to ATen node mapping:
# x_18 => _unsafe_index_3
# Graph fragment:
# %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%view_38, [None, None, %unsqueeze_3, %convert_element_type_13]), kwargs = {})
triton_poi_fused__unsafe_index_24 = async_compile.triton('triton_poi_fused__unsafe_index_24', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_24', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_24(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 64) % 64
x0 = xindex % 64
x5 = (xindex // 4096)
x3 = (xindex // 65536)
x6 = xindex % 65536
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (x5), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr3 + (x5), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 32, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + (32*tmp4) + (1024*x5)), None, eviction_policy='evict_last')
tmp11 = tmp9 - tmp10
tmp13 = tmp11 * tmp12
tmp14 = 0.0
tmp15 = tmp13 > tmp14
tmp16 = 0.001
tmp17 = tmp13 * tmp16
tmp18 = tl.where(tmp15, tmp13, tmp17)
tl.store(out_ptr0 + (x6 + (98304*x3)), tmp18, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/34/c34veqnzsffwucaocbian7dhsieb665y6ii52iajzdtbffi67rza.py
# Topologically Sorted Source Nodes: [conv2d_17, instance_norm_8, x_20], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_17 => convolution_17
# instance_norm_8 => add_24, rsqrt_8, var_mean_8
# x_20 => gt_8, mul_33, where_8
# Graph fragment:
# %convolution_17 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_16, %primals_36, %primals_37, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean_8 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_40, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_24 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_24, 1e-05), kwargs = {})
# %rsqrt_8 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_24,), kwargs = {})
# %gt_8 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_41, 0), kwargs = {})
# %mul_33 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_41, 0.001), kwargs = {})
# %where_8 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_8, %view_41, %mul_33), kwargs = {})
triton_red_fused__native_batch_norm_legit_convolution_leaky_relu_25 = async_compile.triton('triton_red_fused__native_batch_norm_legit_convolution_leaky_relu_25', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[32, 4096],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__native_batch_norm_legit_convolution_leaky_relu_25', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused__native_batch_norm_legit_convolution_leaky_relu_25(in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 32
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x3 = xindex
x0 = xindex % 8
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_out_ptr0 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = triton_helpers.welford_reduce(
tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0
)
tmp4_mean = tl.where(rmask & xmask, tmp4_mean_next, tmp4_mean)
tmp4_m2 = tl.where(rmask & xmask, tmp4_m2_next, tmp4_m2)
tmp4_weight = tl.where(rmask & xmask, tmp4_weight_next, tmp4_weight)
tl.store(in_out_ptr0 + (r2 + (4096*x3)), tmp2, rmask & xmask)
tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford(
tmp4_mean, tmp4_m2, tmp4_weight, 1
)
tmp4 = tmp4_tmp[:, None]
tmp5 = tmp5_tmp[:, None]
tmp6 = tmp6_tmp[:, None]
tl.store(out_ptr0 + (x3), tmp4, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp7 = tl.load(in_out_ptr0 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp8 = tmp7 - tmp4
tmp9 = 4096.0
tmp10 = tmp5 / tmp9
tmp11 = 1e-05
tmp12 = tmp10 + tmp11
tmp13 = libdevice.rsqrt(tmp12)
tmp14 = tmp8 * tmp13
tmp15 = 0.0
tmp16 = tmp14 > tmp15
tmp17 = 0.001
tmp18 = tmp14 * tmp17
tmp19 = tl.where(tmp16, tmp14, tmp18)
tl.store(out_ptr2 + (r2 + (4096*x3)), tmp19, rmask & xmask)
tmp20 = 4096.0
tmp21 = tmp5 / tmp20
tmp22 = 1e-05
tmp23 = tmp21 + tmp22
tmp24 = libdevice.rsqrt(tmp23)
tl.store(out_ptr3 + (x3), tmp24, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/b5/cb5rpjyp43cqaxtedd72lbg7klkcx7kxvnueehz4irdta5tihg6w.py
# Topologically Sorted Source Nodes: [conv2d_18], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_18 => convolution_18
# Graph fragment:
# %convolution_18 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%view_43, %primals_38, %primals_39, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_26 = async_compile.triton('triton_poi_fused_convolution_26', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_26', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_26(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), None)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + (x0), tmp3, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xo/cxomx7hetfbcos4prg63lvvjnowlmjg473kz6d4g7zxa4t2htmmp.py
# Topologically Sorted Source Nodes: [conv2d_19, instance_norm_9, conv2d_21, instance_norm_10, x_23], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.add]
# Source node to ATen node mapping:
# conv2d_19 => convolution_19
# conv2d_21 => convolution_21
# instance_norm_10 => add_26, rsqrt_10, var_mean_10
# instance_norm_9 => add_25, rsqrt_9, var_mean_9
# x_23 => add_27
# Graph fragment:
# %convolution_19 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_18, %primals_40, %primals_41, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean_9 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_45, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_25 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_26, 1e-05), kwargs = {})
# %rsqrt_9 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_25,), kwargs = {})
# %convolution_21 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_20, %primals_44, %primals_45, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean_10 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_50, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_26 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_28, 1e-05), kwargs = {})
# %rsqrt_10 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_26,), kwargs = {})
# %add_27 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_48, %view_53), kwargs = {})
triton_red_fused__native_batch_norm_legit_add_convolution_27 = async_compile.triton('triton_red_fused__native_batch_norm_legit_add_convolution_27', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[4, 4096],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__native_batch_norm_legit_add_convolution_27', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1', 'in_out_ptr2', 'in_out_ptr3'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused__native_batch_norm_legit_add_convolution_27(in_out_ptr0, in_out_ptr1, in_out_ptr2, in_out_ptr3, in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 4
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp5_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp5_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_out_ptr0 + (r1 + (4096*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp3 = tmp0 + tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp5_mean_next, tmp5_m2_next, tmp5_weight_next = triton_helpers.welford_reduce(
tmp4, tmp5_mean, tmp5_m2, tmp5_weight, roffset == 0
)
tmp5_mean = tl.where(rmask & xmask, tmp5_mean_next, tmp5_mean)
tmp5_m2 = tl.where(rmask & xmask, tmp5_m2_next, tmp5_m2)
tmp5_weight = tl.where(rmask & xmask, tmp5_weight_next, tmp5_weight)
tl.store(in_out_ptr0 + (r1 + (4096*x0)), tmp3, rmask & xmask)
tmp5_tmp, tmp6_tmp, tmp7_tmp = triton_helpers.welford(
tmp5_mean, tmp5_m2, tmp5_weight, 1
)
tmp5 = tmp5_tmp[:, None]
tmp6 = tmp6_tmp[:, None]
tmp7 = tmp7_tmp[:, None]
tl.store(out_ptr0 + (x0), tmp5, xmask)
tmp8 = 4096.0
tmp9 = tmp6 / tmp8
tmp10 = 1e-05
tmp11 = tmp9 + tmp10
tmp12 = libdevice.rsqrt(tmp11)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x0), tmp12, xmask)
tmp14 = tl.load(in_ptr1 + (0))
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp18_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp18_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp18_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp13 = tl.load(in_out_ptr2 + (r1 + (4096*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tmp13 + tmp15
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp18_mean_next, tmp18_m2_next, tmp18_weight_next = triton_helpers.welford_reduce(
tmp17, tmp18_mean, tmp18_m2, tmp18_weight, roffset == 0
)
tmp18_mean = tl.where(rmask & xmask, tmp18_mean_next, tmp18_mean)
tmp18_m2 = tl.where(rmask & xmask, tmp18_m2_next, tmp18_m2)
tmp18_weight = tl.where(rmask & xmask, tmp18_weight_next, tmp18_weight)
tl.store(in_out_ptr2 + (r1 + (4096*x0)), tmp16, rmask & xmask)
tmp18_tmp, tmp19_tmp, tmp20_tmp = triton_helpers.welford(
tmp18_mean, tmp18_m2, tmp18_weight, 1
)
tmp18 = tmp18_tmp[:, None]
tmp19 = tmp19_tmp[:, None]
tmp20 = tmp20_tmp[:, None]
tl.store(out_ptr1 + (x0), tmp18, xmask)
tmp21 = 4096.0
tmp22 = tmp19 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.rsqrt(tmp24)
tl.debug_barrier()
tl.store(in_out_ptr3 + (x0), tmp25, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp26 = tl.load(in_out_ptr0 + (r1 + (4096*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp34 = tl.load(in_out_ptr2 + (r1 + (4096*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp27 = tmp26 - tmp5
tmp28 = tmp27 * tmp12
tmp29 = 0.0
tmp30 = tmp28 > tmp29
tmp31 = 0.001
tmp32 = tmp28 * tmp31
tmp33 = tl.where(tmp30, tmp28, tmp32)
tmp35 = tmp34 - tmp18
tmp36 = tmp35 * tmp25
tmp37 = tmp36 > tmp29
tmp38 = tmp36 * tmp31
tmp39 = tl.where(tmp37, tmp36, tmp38)
tmp40 = tmp33 + tmp39
tl.store(out_ptr2 + (r1 + (4096*x0)), tmp40, rmask & xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_2, (8, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_3, (8, ), (1, ))
assert_size_stride(primals_4, (8, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_5, (8, ), (1, ))
assert_size_stride(primals_6, (16, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_7, (16, ), (1, ))
assert_size_stride(primals_8, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_9, (16, ), (1, ))
assert_size_stride(primals_10, (32, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_11, (32, ), (1, ))
assert_size_stride(primals_12, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_13, (32, ), (1, ))
assert_size_stride(primals_14, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_15, (64, ), (1, ))
assert_size_stride(primals_16, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_17, (64, ), (1, ))
assert_size_stride(primals_18, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_19, (128, ), (1, ))
assert_size_stride(primals_20, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_21, (128, ), (1, ))
assert_size_stride(primals_22, (64, 192, 3, 3), (1728, 9, 3, 1))
assert_size_stride(primals_23, (64, ), (1, ))
assert_size_stride(primals_24, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_25, (64, ), (1, ))
assert_size_stride(primals_26, (32, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_27, (32, ), (1, ))
assert_size_stride(primals_28, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_29, (32, ), (1, ))
assert_size_stride(primals_30, (16, 48, 3, 3), (432, 9, 3, 1))
assert_size_stride(primals_31, (16, ), (1, ))
assert_size_stride(primals_32, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_33, (16, ), (1, ))
assert_size_stride(primals_34, (8, 24, 3, 3), (216, 9, 3, 1))
assert_size_stride(primals_35, (8, ), (1, ))
assert_size_stride(primals_36, (8, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_37, (8, ), (1, ))
assert_size_stride(primals_38, (1, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_39, (1, ), (1, ))
assert_size_stride(primals_40, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_41, (1, ), (1, ))
assert_size_stride(primals_42, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_43, (1, ), (1, ))
assert_size_stride(primals_44, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_45, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_3, 131072, grid=grid(131072), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf3 = buf2; del buf2 # reuse
buf4 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32)
buf8 = empty_strided_cuda((4, 8, 64, 64), (32768, 4096, 64, 1), torch.float32)
buf91 = empty_strided_cuda((4, 24, 64, 64), (98304, 4096, 64, 1), torch.float32)
buf90 = reinterpret_tensor(buf91, (4, 8, 64, 64), (98304, 4096, 64, 1), 65536) # alias
buf7 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_1, instance_norm, x, x_19], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.leaky_relu, aten.cat]
triton_red_fused__native_batch_norm_legit_cat_convolution_leaky_relu_1.run(buf3, primals_5, buf4, buf8, buf90, buf7, 32, 4096, grid=grid(32), stream=stream0)
del primals_5
buf9 = empty_strided_cuda((4, 8, 32, 32), (8192, 1024, 32, 1), torch.float32)
buf10 = empty_strided_cuda((4, 8, 32, 32), (8192, 1024, 32, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_2.run(buf8, buf9, buf10, 32768, grid=grid(32768), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf11 = extern_kernels.convolution(buf9, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 16, 32, 32), (16384, 1024, 32, 1))
buf12 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_3.run(buf12, primals_7, 65536, grid=grid(65536), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf13 = extern_kernels.convolution(buf12, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 16, 32, 32), (16384, 1024, 32, 1))
buf14 = buf13; del buf13 # reuse
buf15 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.float32)
buf19 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1), torch.float32)
buf79 = empty_strided_cuda((4, 48, 32, 32), (49152, 1024, 32, 1), torch.float32)
buf78 = reinterpret_tensor(buf79, (4, 16, 32, 32), (49152, 1024, 32, 1), 32768) # alias
buf18 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_3, instance_norm_1, x_2, x_16], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.leaky_relu, aten.cat]
triton_per_fused__native_batch_norm_legit_cat_convolution_leaky_relu_4.run(buf14, primals_9, buf15, buf19, buf78, buf18, 64, 1024, grid=grid(64), stream=stream0)
del primals_9
buf20 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1), torch.float32)
buf21 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_5.run(buf19, buf20, buf21, 16384, grid=grid(16384), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf22 = extern_kernels.convolution(buf20, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 32, 16, 16), (8192, 256, 16, 1))
buf23 = buf22; del buf22 # reuse
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
triton_poi_fused_convolution_6.run(buf23, primals_11, 32768, grid=grid(32768), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution]
buf24 = extern_kernels.convolution(buf23, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 32, 16, 16), (8192, 256, 16, 1))
buf25 = buf24; del buf24 # reuse
buf26 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch.float32)
buf30 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.float32)
buf67 = empty_strided_cuda((4, 96, 16, 16), (24576, 256, 16, 1), torch.float32)
buf66 = reinterpret_tensor(buf67, (4, 32, 16, 16), (24576, 256, 16, 1), 16384) # alias
buf29 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_5, instance_norm_2, x_4, x_13], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.leaky_relu, aten.cat]
triton_per_fused__native_batch_norm_legit_cat_convolution_leaky_relu_7.run(buf25, primals_13, buf26, buf30, buf66, buf29, 128, 256, grid=grid(128), stream=stream0)
del primals_13
buf31 = empty_strided_cuda((4, 32, 8, 8), (2048, 64, 8, 1), torch.float32)
buf32 = empty_strided_cuda((4, 32, 8, 8), (2048, 64, 8, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_8.run(buf30, buf31, buf32, 8192, grid=grid(8192), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution]
buf33 = extern_kernels.convolution(buf31, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf33, (4, 64, 8, 8), (4096, 64, 8, 1))
buf34 = buf33; del buf33 # reuse
# Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution]
triton_poi_fused_convolution_9.run(buf34, primals_15, 16384, grid=grid(16384), stream=stream0)
del primals_15
# Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution]
buf35 = extern_kernels.convolution(buf34, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf35, (4, 64, 8, 8), (4096, 64, 8, 1))
buf36 = buf35; del buf35 # reuse
buf37 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32)
buf41 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch.float32)
buf55 = empty_strided_cuda((4, 192, 8, 8), (12288, 64, 8, 1), torch.float32)
buf54 = reinterpret_tensor(buf55, (4, 64, 8, 8), (12288, 64, 8, 1), 8192) # alias
buf40 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_7, instance_norm_3, x_6, x_10], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.leaky_relu, aten.cat]
triton_per_fused__native_batch_norm_legit_cat_convolution_leaky_relu_10.run(buf36, primals_17, buf37, buf41, buf54, buf40, 256, 64, grid=grid(256), stream=stream0)
del primals_17
buf42 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.float32)
buf43 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_11.run(buf41, buf42, buf43, 4096, grid=grid(4096), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution]
buf44 = extern_kernels.convolution(buf42, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf44, (4, 128, 4, 4), (2048, 16, 4, 1))
buf45 = buf44; del buf44 # reuse
# Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution]
triton_poi_fused_convolution_12.run(buf45, primals_19, 8192, grid=grid(8192), stream=stream0)
del primals_19
# Topologically Sorted Source Nodes: [conv2d_9], Original ATen: [aten.convolution]
buf46 = extern_kernels.convolution(buf45, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf46, (4, 128, 4, 4), (2048, 16, 4, 1))
buf47 = buf46; del buf46 # reuse
buf48 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch.float32)
buf49 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32)
buf51 = reinterpret_tensor(buf49, (1, 512, 1, 1), (512, 1, 1, 1), 0); del buf49 # reuse
# Topologically Sorted Source Nodes: [conv2d_9, instance_norm_4], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_convolution_13.run(buf47, buf51, primals_21, buf48, 512, 16, grid=grid(512), stream=stream0)
del primals_21
buf52 = empty_strided_cuda((8, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_14.run(buf52, 8, grid=grid(8), stream=stream0)
buf53 = reinterpret_tensor(buf55, (4, 128, 8, 8), (12288, 64, 8, 1), 0) # alias
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten._unsafe_index]
triton_poi_fused__unsafe_index_15.run(buf52, buf47, buf48, buf51, buf53, 32768, grid=grid(32768), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_10], Original ATen: [aten.convolution]
buf56 = extern_kernels.convolution(buf55, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf56, (4, 64, 8, 8), (4096, 64, 8, 1))
buf57 = buf56; del buf56 # reuse
# Topologically Sorted Source Nodes: [conv2d_10], Original ATen: [aten.convolution]
triton_poi_fused_convolution_9.run(buf57, primals_23, 16384, grid=grid(16384), stream=stream0)
del primals_23
# Topologically Sorted Source Nodes: [conv2d_11], Original ATen: [aten.convolution]
buf58 = extern_kernels.convolution(buf57, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf58, (4, 64, 8, 8), (4096, 64, 8, 1))
buf59 = buf58; del buf58 # reuse
buf60 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 1, 1), torch.float32)
buf61 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32)
buf63 = reinterpret_tensor(buf61, (1, 256, 1, 1), (256, 1, 1, 1), 0); del buf61 # reuse
# Topologically Sorted Source Nodes: [conv2d_11, instance_norm_5], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_convolution_16.run(buf59, buf63, primals_25, buf60, 256, 64, grid=grid(256), stream=stream0)
del primals_25
buf64 = empty_strided_cuda((16, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_17.run(buf64, 16, grid=grid(16), stream=stream0)
buf65 = reinterpret_tensor(buf67, (4, 64, 16, 16), (24576, 256, 16, 1), 0) # alias
# Topologically Sorted Source Nodes: [x_12], Original ATen: [aten._unsafe_index]
triton_poi_fused__unsafe_index_18.run(buf64, buf59, buf60, buf63, buf65, 65536, grid=grid(65536), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_12], Original ATen: [aten.convolution]
buf68 = extern_kernels.convolution(buf67, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf68, (4, 32, 16, 16), (8192, 256, 16, 1))
buf69 = buf68; del buf68 # reuse
# Topologically Sorted Source Nodes: [conv2d_12], Original ATen: [aten.convolution]
triton_poi_fused_convolution_6.run(buf69, primals_27, 32768, grid=grid(32768), stream=stream0)
del primals_27
# Topologically Sorted Source Nodes: [conv2d_13], Original ATen: [aten.convolution]
buf70 = extern_kernels.convolution(buf69, primals_28, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf70, (4, 32, 16, 16), (8192, 256, 16, 1))
buf71 = buf70; del buf70 # reuse
buf72 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 1, 1), torch.float32)
buf73 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch.float32)
buf75 = reinterpret_tensor(buf73, (1, 128, 1, 1), (128, 1, 1, 1), 0); del buf73 # reuse
# Topologically Sorted Source Nodes: [conv2d_13, instance_norm_6], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_convolution_19.run(buf71, buf75, primals_29, buf72, 128, 256, grid=grid(128), stream=stream0)
del primals_29
buf76 = empty_strided_cuda((32, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_15], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_20.run(buf76, 32, grid=grid(32), stream=stream0)
buf77 = reinterpret_tensor(buf79, (4, 32, 32, 32), (49152, 1024, 32, 1), 0) # alias
# Topologically Sorted Source Nodes: [x_15], Original ATen: [aten._unsafe_index]
triton_poi_fused__unsafe_index_21.run(buf76, buf71, buf72, buf75, buf77, 131072, grid=grid(131072), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_14], Original ATen: [aten.convolution]
buf80 = extern_kernels.convolution(buf79, primals_30, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf80, (4, 16, 32, 32), (16384, 1024, 32, 1))
buf81 = buf80; del buf80 # reuse
# Topologically Sorted Source Nodes: [conv2d_14], Original ATen: [aten.convolution]
triton_poi_fused_convolution_3.run(buf81, primals_31, 65536, grid=grid(65536), stream=stream0)
del primals_31
# Topologically Sorted Source Nodes: [conv2d_15], Original ATen: [aten.convolution]
buf82 = extern_kernels.convolution(buf81, primals_32, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf82, (4, 16, 32, 32), (16384, 1024, 32, 1))
buf83 = buf82; del buf82 # reuse
buf84 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 1, 1), torch.float32)
buf85 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.float32)
buf87 = reinterpret_tensor(buf85, (1, 64, 1, 1), (64, 1, 1, 1), 0); del buf85 # reuse
# Topologically Sorted Source Nodes: [conv2d_15, instance_norm_7], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_convolution_22.run(buf83, buf87, primals_33, buf84, 64, 1024, grid=grid(64), stream=stream0)
del primals_33
buf88 = empty_strided_cuda((64, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_18], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_23.run(buf88, 64, grid=grid(64), stream=stream0)
buf89 = reinterpret_tensor(buf91, (4, 16, 64, 64), (98304, 4096, 64, 1), 0) # alias
# Topologically Sorted Source Nodes: [x_18], Original ATen: [aten._unsafe_index]
triton_poi_fused__unsafe_index_24.run(buf88, buf83, buf84, buf87, buf89, 262144, grid=grid(262144), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_16], Original ATen: [aten.convolution]
buf92 = extern_kernels.convolution(buf91, primals_34, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf92, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf93 = buf92; del buf92 # reuse
# Topologically Sorted Source Nodes: [conv2d_16], Original ATen: [aten.convolution]
triton_poi_fused_convolution_0.run(buf93, primals_35, 131072, grid=grid(131072), stream=stream0)
del primals_35
# Topologically Sorted Source Nodes: [conv2d_17], Original ATen: [aten.convolution]
buf94 = extern_kernels.convolution(buf93, primals_36, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf94, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf95 = buf94; del buf94 # reuse
buf96 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32)
buf100 = empty_strided_cuda((4, 8, 64, 64), (32768, 4096, 64, 1), torch.float32)
buf99 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_17, instance_norm_8, x_20], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.leaky_relu]
triton_red_fused__native_batch_norm_legit_convolution_leaky_relu_25.run(buf95, primals_37, buf96, buf100, buf99, 32, 4096, grid=grid(32), stream=stream0)
del primals_37
# Topologically Sorted Source Nodes: [conv2d_18], Original ATen: [aten.convolution]
buf101 = extern_kernels.convolution(buf100, primals_38, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf101, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf102 = buf101; del buf101 # reuse
# Topologically Sorted Source Nodes: [conv2d_18], Original ATen: [aten.convolution]
triton_poi_fused_convolution_26.run(buf102, primals_39, 16384, grid=grid(16384), stream=stream0)
del primals_39
# Topologically Sorted Source Nodes: [conv2d_19], Original ATen: [aten.convolution]
buf103 = extern_kernels.convolution(buf102, primals_40, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf103, (4, 1, 64, 64), (4096, 4096, 64, 1))
# Topologically Sorted Source Nodes: [conv2d_20], Original ATen: [aten.convolution]
buf109 = extern_kernels.convolution(primals_1, primals_42, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf109, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf110 = buf109; del buf109 # reuse
# Topologically Sorted Source Nodes: [conv2d_20], Original ATen: [aten.convolution]
triton_poi_fused_convolution_26.run(buf110, primals_43, 16384, grid=grid(16384), stream=stream0)
del primals_43
# Topologically Sorted Source Nodes: [conv2d_21], Original ATen: [aten.convolution]
buf111 = extern_kernels.convolution(buf110, primals_44, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf111, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf104 = buf103; del buf103 # reuse
buf105 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 1, 1), torch.float32)
buf106 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32)
buf108 = reinterpret_tensor(buf106, (1, 4, 1, 1), (4, 1, 1, 1), 0); del buf106 # reuse
buf112 = buf111; del buf111 # reuse
buf113 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 1, 1), torch.float32)
buf114 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32)
buf116 = reinterpret_tensor(buf114, (1, 4, 1, 1), (4, 1, 1, 1), 0); del buf114 # reuse
buf117 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_19, instance_norm_9, conv2d_21, instance_norm_10, x_23], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.add]
triton_red_fused__native_batch_norm_legit_add_convolution_27.run(buf104, buf108, buf112, buf116, primals_41, primals_45, buf105, buf113, buf117, 4, 4096, grid=grid(4), stream=stream0)
del primals_41
del primals_45
return (buf117, primals_1, primals_2, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34, primals_36, primals_38, primals_40, primals_42, primals_44, buf1, buf3, reinterpret_tensor(buf7, (32, ), (1, ), 0), buf8, buf9, buf10, buf12, buf14, reinterpret_tensor(buf18, (64, ), (1, ), 0), buf19, buf20, buf21, buf23, buf25, reinterpret_tensor(buf29, (128, ), (1, ), 0), buf30, buf31, buf32, buf34, buf36, reinterpret_tensor(buf40, (256, ), (1, ), 0), buf41, buf42, buf43, buf45, buf47, buf48, buf51, buf52, buf55, buf57, buf59, buf60, buf63, buf64, buf67, buf69, buf71, buf72, buf75, buf76, buf79, buf81, buf83, buf84, buf87, buf88, buf91, buf93, buf95, reinterpret_tensor(buf99, (32, ), (1, ), 0), buf100, buf102, buf104, buf105, buf108, buf110, buf112, buf113, buf116, reinterpret_tensor(buf96, (1, 32, 1, 1), (32, 1, 1, 1), 0), reinterpret_tensor(buf37, (1, 256, 1, 1), (256, 1, 1, 1), 0), reinterpret_tensor(buf26, (1, 128, 1, 1), (128, 1, 1, 1), 0), reinterpret_tensor(buf15, (1, 64, 1, 1), (64, 1, 1, 1), 0), reinterpret_tensor(buf4, (1, 32, 1, 1), (32, 1, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((8, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((16, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((16, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((32, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((64, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((64, 192, 3, 3), (1728, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((32, 96, 3, 3), (864, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_28 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_29 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_30 = rand_strided((16, 48, 3, 3), (432, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_31 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_32 = rand_strided((16, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_33 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_34 = rand_strided((8, 24, 3, 3), (216, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_35 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_36 = rand_strided((8, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_37 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_38 = rand_strided((1, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_39 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_40 = rand_strided((1, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_41 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_42 = rand_strided((1, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_43 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_44 = rand_strided((1, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_45 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class BasicConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(BasicConvBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3,
padding=1)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3,
padding=1)
self.norm = nn.InstanceNorm2d(out_channels)
def forward(self, x):
x = F.leaky_relu(self.norm(self.conv2(self.conv1(x))),
negative_slope=0.001, inplace=True)
return x
class BasicUNet(nn.Module):
def __init__(self, n_input_channel=1, n_output_channel=1, nic=8,
residual=True):
super().__init__()
self.residual = residual
self.down1 = nn.MaxPool2d(kernel_size=2)
self.down2 = nn.MaxPool2d(kernel_size=2)
self.down3 = nn.MaxPool2d(kernel_size=2)
self.down4 = nn.MaxPool2d(kernel_size=2)
self.up1 = lambda x: nn.functional.interpolate(x, mode='nearest',
scale_factor=2)
self.up2 = lambda x: nn.functional.interpolate(x, mode='nearest',
scale_factor=2)
self.up3 = lambda x: nn.functional.interpolate(x, mode='nearest',
scale_factor=2)
self.up4 = lambda x: nn.functional.interpolate(x, mode='nearest',
scale_factor=2)
self.conv1 = BasicConvBlock(n_input_channel, nic)
self.conv2 = BasicConvBlock(nic, nic * 2)
self.conv3 = BasicConvBlock(nic * 2, nic * 4)
self.conv4 = BasicConvBlock(nic * 4, nic * 8)
self.conv5 = BasicConvBlock(nic * 8, nic * 16)
self.conv6 = BasicConvBlock(nic * 3 * 8, nic * 8)
self.conv7 = BasicConvBlock(nic * 3 * 4, nic * 4)
self.conv8 = BasicConvBlock(nic * 3 * 2, nic * 2)
self.conv9 = BasicConvBlock(nic * 3, nic)
self.conv10 = BasicConvBlock(nic, n_output_channel)
if self.residual:
self.conv11 = BasicConvBlock(n_input_channel, n_output_channel)
def forward(self, x):
c0 = x
c1 = self.conv1(x)
x = self.down1(c1)
c2 = self.conv2(x)
x = self.down2(c2)
c3 = self.conv3(x)
x = self.down3(c3)
c4 = self.conv4(x)
x = self.down4(c4)
x = self.conv5(x)
x = self.up1(x)
x = torch.cat([x, c4], 1)
x = self.conv6(x)
x = self.up2(x)
x = torch.cat([x, c3], 1)
x = self.conv7(x)
x = self.up3(x)
x = torch.cat([x, c2], 1)
x = self.conv8(x)
x = self.up4(x)
x = torch.cat([x, c1], 1)
x = self.conv9(x)
x = self.conv10(x)
if self.residual:
x = torch.add(x, self.conv11(c0))
return x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 8
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_red_fused__native_batch_norm_legit_cat_convolution_leaky_relu_1(
in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel,
rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 32
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x3 = xindex
x0 = xindex % 8
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_out_ptr0 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = (triton_helpers.
welford_reduce(tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0)
)
tmp4_mean = tl.where(rmask & xmask, tmp4_mean_next, tmp4_mean)
tmp4_m2 = tl.where(rmask & xmask, tmp4_m2_next, tmp4_m2)
tmp4_weight = tl.where(rmask & xmask, tmp4_weight_next, tmp4_weight)
tl.store(in_out_ptr0 + (r2 + 4096 * x3), tmp2, rmask & xmask)
tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford(tmp4_mean,
tmp4_m2, tmp4_weight, 1)
tmp4 = tmp4_tmp[:, None]
tmp5 = tmp5_tmp[:, None]
tmp6_tmp[:, None]
tl.store(out_ptr0 + x3, tmp4, xmask)
x1 = xindex // 8
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp7 = tl.load(in_out_ptr0 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp8 = tmp7 - tmp4
tmp9 = 4096.0
tmp10 = tmp5 / tmp9
tmp11 = 1e-05
tmp12 = tmp10 + tmp11
tmp13 = libdevice.rsqrt(tmp12)
tmp14 = tmp8 * tmp13
tmp15 = 0.0
tmp16 = tmp14 > tmp15
tmp17 = 0.001
tmp18 = tmp14 * tmp17
tmp19 = tl.where(tmp16, tmp14, tmp18)
tl.store(out_ptr2 + (r2 + 4096 * x3), tmp19, rmask & xmask)
tl.store(out_ptr3 + (r2 + 4096 * x0 + 98304 * x1), tmp19, rmask & xmask
)
tmp20 = 4096.0
tmp21 = tmp5 / tmp20
tmp22 = 1e-05
tmp23 = tmp21 + tmp22
tmp24 = libdevice.rsqrt(tmp23)
tl.store(out_ptr4 + x3, tmp24, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_per_fused__native_batch_norm_legit_cat_convolution_leaky_relu_4(
in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel,
rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 16
x1 = xindex // 16
tmp0 = tl.load(in_out_ptr0 + (r2 + 1024 * x3), None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = tl.broadcast_to(tmp3, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = tl.full([1], 1024, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp3 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = tmp2 - tmp10
tmp17 = 1024.0
tmp18 = tmp15 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp22 = tmp16 * tmp21
tmp23 = 0.0
tmp24 = tmp22 > tmp23
tmp25 = 0.001
tmp26 = tmp22 * tmp25
tmp27 = tl.where(tmp24, tmp22, tmp26)
tl.store(in_out_ptr0 + (r2 + 1024 * x3), tmp2, None)
tl.store(out_ptr2 + (r2 + 1024 * x3), tmp27, None)
tl.store(out_ptr3 + (r2 + 1024 * x0 + 49152 * x1), tmp27, None)
tl.store(out_ptr4 + x3, tmp21, None)
tl.store(out_ptr0 + x3, tmp10, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_per_fused__native_batch_norm_legit_cat_convolution_leaky_relu_7(
in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel,
rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 32
x1 = xindex // 32
tmp0 = tl.load(in_out_ptr0 + (r2 + 256 * x3), None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = tl.broadcast_to(tmp3, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = tl.full([1], 256, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp3 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = tmp2 - tmp10
tmp17 = 256.0
tmp18 = tmp15 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp22 = tmp16 * tmp21
tmp23 = 0.0
tmp24 = tmp22 > tmp23
tmp25 = 0.001
tmp26 = tmp22 * tmp25
tmp27 = tl.where(tmp24, tmp22, tmp26)
tl.store(in_out_ptr0 + (r2 + 256 * x3), tmp2, None)
tl.store(out_ptr2 + (r2 + 256 * x3), tmp27, None)
tl.store(out_ptr3 + (r2 + 256 * x0 + 24576 * x1), tmp27, None)
tl.store(out_ptr4 + x3, tmp21, None)
tl.store(out_ptr0 + x3, tmp10, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_per_fused__native_batch_norm_legit_cat_convolution_leaky_relu_10(
in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 256
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 64
x1 = xindex // 64
tmp0 = tl.load(in_out_ptr0 + (r2 + 64 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = tmp2 - tmp12
tmp20 = 64.0
tmp21 = tmp18 / tmp20
tmp22 = 1e-05
tmp23 = tmp21 + tmp22
tmp24 = libdevice.rsqrt(tmp23)
tmp25 = tmp19 * tmp24
tmp26 = 0.0
tmp27 = tmp25 > tmp26
tmp28 = 0.001
tmp29 = tmp25 * tmp28
tmp30 = tl.where(tmp27, tmp25, tmp29)
tl.store(in_out_ptr0 + (r2 + 64 * x3), tmp2, xmask)
tl.store(out_ptr2 + (r2 + 64 * x3), tmp30, xmask)
tl.store(out_ptr3 + (r2 + 64 * x0 + 12288 * x1), tmp30, xmask)
tl.store(out_ptr4 + x3, tmp24, xmask)
tl.store(out_ptr0 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_11(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (8 + 2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (9 + 2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_12(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_13(in_out_ptr0,
in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 512
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp18 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x3, tmp23, xmask)
tl.store(out_ptr0 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_14(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_15(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 8 % 8
x0 = xindex % 8
x5 = xindex // 64
x3 = xindex // 8192
x6 = xindex % 8192
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + x5, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr3 + x5, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + 4 * tmp4 + 16 * x5), None,
eviction_policy='evict_last')
tmp11 = tmp9 - tmp10
tmp13 = tmp11 * tmp12
tmp14 = 0.0
tmp15 = tmp13 > tmp14
tmp16 = 0.001
tmp17 = tmp13 * tmp16
tmp18 = tl.where(tmp15, tmp13, tmp17)
tl.store(out_ptr0 + (x6 + 12288 * x3), tmp18, None)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_16(in_out_ptr0,
in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 256
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (r2 + 64 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 64.0
tmp20 = tmp18 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(in_out_ptr0 + (r2 + 64 * x3), tmp2, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x3, tmp23, xmask)
tl.store(out_ptr0 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_17(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_18(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 16 % 16
x0 = xindex % 16
x5 = xindex // 256
x3 = xindex // 16384
x6 = xindex % 16384
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + x5, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr3 + x5, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 8, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + 8 * tmp4 + 64 * x5), None,
eviction_policy='evict_last')
tmp11 = tmp9 - tmp10
tmp13 = tmp11 * tmp12
tmp14 = 0.0
tmp15 = tmp13 > tmp14
tmp16 = 0.001
tmp17 = tmp13 * tmp16
tmp18 = tl.where(tmp15, tmp13, tmp17)
tl.store(out_ptr0 + (x6 + 24576 * x3), tmp18, None)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_19(in_out_ptr0,
in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (r2 + 256 * x3), None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = tl.broadcast_to(tmp3, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = tl.full([1], 256, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp3 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tl.store(in_out_ptr0 + (r2 + 256 * x3), tmp2, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + x3, tmp20, None)
tl.store(out_ptr0 + x3, tmp10, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_20(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_21(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 32 % 32
x0 = xindex % 32
x5 = xindex // 1024
x3 = xindex // 32768
x6 = xindex % 32768
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + x5, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr3 + x5, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 16, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + 16 * tmp4 + 256 * x5), None,
eviction_policy='evict_last')
tmp11 = tmp9 - tmp10
tmp13 = tmp11 * tmp12
tmp14 = 0.0
tmp15 = tmp13 > tmp14
tmp16 = 0.001
tmp17 = tmp13 * tmp16
tmp18 = tl.where(tmp15, tmp13, tmp17)
tl.store(out_ptr0 + (x6 + 49152 * x3), tmp18, None)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_22(in_out_ptr0,
in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + (r2 + 1024 * x3), None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = tl.broadcast_to(tmp3, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = tl.full([1], 1024, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp3 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 1024.0
tmp17 = tmp15 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tl.store(in_out_ptr0 + (r2 + 1024 * x3), tmp2, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + x3, tmp20, None)
tl.store(out_ptr0 + x3, tmp10, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_23(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_24(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 64 % 64
x0 = xindex % 64
x5 = xindex // 4096
x3 = xindex // 65536
x6 = xindex % 65536
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + x5, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr3 + x5, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 32, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + 32 * tmp4 + 1024 * x5), None,
eviction_policy='evict_last')
tmp11 = tmp9 - tmp10
tmp13 = tmp11 * tmp12
tmp14 = 0.0
tmp15 = tmp13 > tmp14
tmp16 = 0.001
tmp17 = tmp13 * tmp16
tmp18 = tl.where(tmp15, tmp13, tmp17)
tl.store(out_ptr0 + (x6 + 98304 * x3), tmp18, None)
@triton.jit
def triton_red_fused__native_batch_norm_legit_convolution_leaky_relu_25(
in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel,
XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 32
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x3 = xindex
x0 = xindex % 8
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_out_ptr0 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = (triton_helpers.
welford_reduce(tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0)
)
tmp4_mean = tl.where(rmask & xmask, tmp4_mean_next, tmp4_mean)
tmp4_m2 = tl.where(rmask & xmask, tmp4_m2_next, tmp4_m2)
tmp4_weight = tl.where(rmask & xmask, tmp4_weight_next, tmp4_weight)
tl.store(in_out_ptr0 + (r2 + 4096 * x3), tmp2, rmask & xmask)
tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford(tmp4_mean,
tmp4_m2, tmp4_weight, 1)
tmp4 = tmp4_tmp[:, None]
tmp5 = tmp5_tmp[:, None]
tmp6_tmp[:, None]
tl.store(out_ptr0 + x3, tmp4, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp7 = tl.load(in_out_ptr0 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp8 = tmp7 - tmp4
tmp9 = 4096.0
tmp10 = tmp5 / tmp9
tmp11 = 1e-05
tmp12 = tmp10 + tmp11
tmp13 = libdevice.rsqrt(tmp12)
tmp14 = tmp8 * tmp13
tmp15 = 0.0
tmp16 = tmp14 > tmp15
tmp17 = 0.001
tmp18 = tmp14 * tmp17
tmp19 = tl.where(tmp16, tmp14, tmp18)
tl.store(out_ptr2 + (r2 + 4096 * x3), tmp19, rmask & xmask)
tmp20 = 4096.0
tmp21 = tmp5 / tmp20
tmp22 = 1e-05
tmp23 = tmp21 + tmp22
tmp24 = libdevice.rsqrt(tmp23)
tl.store(out_ptr3 + x3, tmp24, xmask)
@triton.jit
def triton_poi_fused_convolution_26(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, None)
@triton.jit
def triton_red_fused__native_batch_norm_legit_add_convolution_27(in_out_ptr0,
in_out_ptr1, in_out_ptr2, in_out_ptr3, in_ptr0, in_ptr1, out_ptr0,
out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.
constexpr):
xnumel = 4
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp5_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp5_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_out_ptr0 + (r1 + 4096 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp3 = tmp0 + tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp5_mean_next, tmp5_m2_next, tmp5_weight_next = (triton_helpers.
welford_reduce(tmp4, tmp5_mean, tmp5_m2, tmp5_weight, roffset == 0)
)
tmp5_mean = tl.where(rmask & xmask, tmp5_mean_next, tmp5_mean)
tmp5_m2 = tl.where(rmask & xmask, tmp5_m2_next, tmp5_m2)
tmp5_weight = tl.where(rmask & xmask, tmp5_weight_next, tmp5_weight)
tl.store(in_out_ptr0 + (r1 + 4096 * x0), tmp3, rmask & xmask)
tmp5_tmp, tmp6_tmp, tmp7_tmp = triton_helpers.welford(tmp5_mean,
tmp5_m2, tmp5_weight, 1)
tmp5 = tmp5_tmp[:, None]
tmp6 = tmp6_tmp[:, None]
tmp7_tmp[:, None]
tl.store(out_ptr0 + x0, tmp5, xmask)
tmp8 = 4096.0
tmp9 = tmp6 / tmp8
tmp10 = 1e-05
tmp11 = tmp9 + tmp10
tmp12 = libdevice.rsqrt(tmp11)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp12, xmask)
tmp14 = tl.load(in_ptr1 + 0)
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp18_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp18_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp18_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp13 = tl.load(in_out_ptr2 + (r1 + 4096 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tmp13 + tmp15
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp18_mean_next, tmp18_m2_next, tmp18_weight_next = (triton_helpers
.welford_reduce(tmp17, tmp18_mean, tmp18_m2, tmp18_weight,
roffset == 0))
tmp18_mean = tl.where(rmask & xmask, tmp18_mean_next, tmp18_mean)
tmp18_m2 = tl.where(rmask & xmask, tmp18_m2_next, tmp18_m2)
tmp18_weight = tl.where(rmask & xmask, tmp18_weight_next, tmp18_weight)
tl.store(in_out_ptr2 + (r1 + 4096 * x0), tmp16, rmask & xmask)
tmp18_tmp, tmp19_tmp, tmp20_tmp = triton_helpers.welford(tmp18_mean,
tmp18_m2, tmp18_weight, 1)
tmp18 = tmp18_tmp[:, None]
tmp19 = tmp19_tmp[:, None]
tmp20_tmp[:, None]
tl.store(out_ptr1 + x0, tmp18, xmask)
tmp21 = 4096.0
tmp22 = tmp19 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.rsqrt(tmp24)
tl.debug_barrier()
tl.store(in_out_ptr3 + x0, tmp25, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp26 = tl.load(in_out_ptr0 + (r1 + 4096 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp34 = tl.load(in_out_ptr2 + (r1 + 4096 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp27 = tmp26 - tmp5
tmp28 = tmp27 * tmp12
tmp29 = 0.0
tmp30 = tmp28 > tmp29
tmp31 = 0.001
tmp32 = tmp28 * tmp31
tmp33 = tl.where(tmp30, tmp28, tmp32)
tmp35 = tmp34 - tmp18
tmp36 = tmp35 * tmp25
tmp37 = tmp36 > tmp29
tmp38 = tmp36 * tmp31
tmp39 = tl.where(tmp37, tmp36, tmp38)
tmp40 = tmp33 + tmp39
tl.store(out_ptr2 + (r1 + 4096 * x0), tmp40, rmask & xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41, primals_42,
primals_43, primals_44, primals_45) = args
args.clear()
assert_size_stride(primals_1, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_2, (8, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_3, (8,), (1,))
assert_size_stride(primals_4, (8, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_5, (8,), (1,))
assert_size_stride(primals_6, (16, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_7, (16,), (1,))
assert_size_stride(primals_8, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_9, (16,), (1,))
assert_size_stride(primals_10, (32, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_11, (32,), (1,))
assert_size_stride(primals_12, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_13, (32,), (1,))
assert_size_stride(primals_14, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_15, (64,), (1,))
assert_size_stride(primals_16, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_17, (64,), (1,))
assert_size_stride(primals_18, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_19, (128,), (1,))
assert_size_stride(primals_20, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_21, (128,), (1,))
assert_size_stride(primals_22, (64, 192, 3, 3), (1728, 9, 3, 1))
assert_size_stride(primals_23, (64,), (1,))
assert_size_stride(primals_24, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_25, (64,), (1,))
assert_size_stride(primals_26, (32, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_27, (32,), (1,))
assert_size_stride(primals_28, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_29, (32,), (1,))
assert_size_stride(primals_30, (16, 48, 3, 3), (432, 9, 3, 1))
assert_size_stride(primals_31, (16,), (1,))
assert_size_stride(primals_32, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_33, (16,), (1,))
assert_size_stride(primals_34, (8, 24, 3, 3), (216, 9, 3, 1))
assert_size_stride(primals_35, (8,), (1,))
assert_size_stride(primals_36, (8, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_37, (8,), (1,))
assert_size_stride(primals_38, (1, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_39, (1,), (1,))
assert_size_stride(primals_40, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_41, (1,), (1,))
assert_size_stride(primals_42, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_43, (1,), (1,))
assert_size_stride(primals_44, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_45, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(131072)](buf1, primals_3,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32
)
buf8 = empty_strided_cuda((4, 8, 64, 64), (32768, 4096, 64, 1),
torch.float32)
buf91 = empty_strided_cuda((4, 24, 64, 64), (98304, 4096, 64, 1),
torch.float32)
buf90 = reinterpret_tensor(buf91, (4, 8, 64, 64), (98304, 4096, 64,
1), 65536)
buf7 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32
)
triton_red_fused__native_batch_norm_legit_cat_convolution_leaky_relu_1[
grid(32)](buf3, primals_5, buf4, buf8, buf90, buf7, 32, 4096,
XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1)
del primals_5
buf9 = empty_strided_cuda((4, 8, 32, 32), (8192, 1024, 32, 1),
torch.float32)
buf10 = empty_strided_cuda((4, 8, 32, 32), (8192, 1024, 32, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_2[grid(32768)](buf8, buf9,
buf10, 32768, XBLOCK=256, num_warps=4, num_stages=1)
buf11 = extern_kernels.convolution(buf9, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 16, 32, 32), (16384, 1024, 32, 1))
buf12 = buf11
del buf11
triton_poi_fused_convolution_3[grid(65536)](buf12, primals_7, 65536,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf13 = extern_kernels.convolution(buf12, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 16, 32, 32), (16384, 1024, 32, 1))
buf14 = buf13
del buf13
buf15 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.
float32)
buf19 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1),
torch.float32)
buf79 = empty_strided_cuda((4, 48, 32, 32), (49152, 1024, 32, 1),
torch.float32)
buf78 = reinterpret_tensor(buf79, (4, 16, 32, 32), (49152, 1024, 32,
1), 32768)
buf18 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.
float32)
triton_per_fused__native_batch_norm_legit_cat_convolution_leaky_relu_4[
grid(64)](buf14, primals_9, buf15, buf19, buf78, buf18, 64,
1024, num_warps=8, num_stages=1)
del primals_9
buf20 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1),
torch.float32)
buf21 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_5[grid(16384)](buf19,
buf20, buf21, 16384, XBLOCK=256, num_warps=4, num_stages=1)
buf22 = extern_kernels.convolution(buf20, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 32, 16, 16), (8192, 256, 16, 1))
buf23 = buf22
del buf22
triton_poi_fused_convolution_6[grid(32768)](buf23, primals_11,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
buf24 = extern_kernels.convolution(buf23, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 32, 16, 16), (8192, 256, 16, 1))
buf25 = buf24
del buf24
buf26 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128),
torch.float32)
buf30 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1),
torch.float32)
buf67 = empty_strided_cuda((4, 96, 16, 16), (24576, 256, 16, 1),
torch.float32)
buf66 = reinterpret_tensor(buf67, (4, 32, 16, 16), (24576, 256, 16,
1), 16384)
buf29 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128),
torch.float32)
triton_per_fused__native_batch_norm_legit_cat_convolution_leaky_relu_7[
grid(128)](buf25, primals_13, buf26, buf30, buf66, buf29, 128,
256, num_warps=2, num_stages=1)
del primals_13
buf31 = empty_strided_cuda((4, 32, 8, 8), (2048, 64, 8, 1), torch.
float32)
buf32 = empty_strided_cuda((4, 32, 8, 8), (2048, 64, 8, 1), torch.int8)
triton_poi_fused_max_pool2d_with_indices_8[grid(8192)](buf30, buf31,
buf32, 8192, XBLOCK=128, num_warps=4, num_stages=1)
buf33 = extern_kernels.convolution(buf31, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf33, (4, 64, 8, 8), (4096, 64, 8, 1))
buf34 = buf33
del buf33
triton_poi_fused_convolution_9[grid(16384)](buf34, primals_15,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_15
buf35 = extern_kernels.convolution(buf34, primals_16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf35, (4, 64, 8, 8), (4096, 64, 8, 1))
buf36 = buf35
del buf35
buf37 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
buf41 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch.
float32)
buf55 = empty_strided_cuda((4, 192, 8, 8), (12288, 64, 8, 1), torch
.float32)
buf54 = reinterpret_tensor(buf55, (4, 64, 8, 8), (12288, 64, 8, 1),
8192)
buf40 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
triton_per_fused__native_batch_norm_legit_cat_convolution_leaky_relu_10[
grid(256)](buf36, primals_17, buf37, buf41, buf54, buf40, 256,
64, XBLOCK=8, num_warps=4, num_stages=1)
del primals_17
buf42 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.
float32)
buf43 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.int8)
triton_poi_fused_max_pool2d_with_indices_11[grid(4096)](buf41,
buf42, buf43, 4096, XBLOCK=128, num_warps=4, num_stages=1)
buf44 = extern_kernels.convolution(buf42, primals_18, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf44, (4, 128, 4, 4), (2048, 16, 4, 1))
buf45 = buf44
del buf44
triton_poi_fused_convolution_12[grid(8192)](buf45, primals_19, 8192,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_19
buf46 = extern_kernels.convolution(buf45, primals_20, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf46, (4, 128, 4, 4), (2048, 16, 4, 1))
buf47 = buf46
del buf46
buf48 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch.
float32)
buf49 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
buf51 = reinterpret_tensor(buf49, (1, 512, 1, 1), (512, 1, 1, 1), 0)
del buf49
triton_per_fused__native_batch_norm_legit_convolution_13[grid(512)](
buf47, buf51, primals_21, buf48, 512, 16, XBLOCK=32, num_warps=
4, num_stages=1)
del primals_21
buf52 = empty_strided_cuda((8,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_14[grid(8)](buf52, 8,
XBLOCK=8, num_warps=1, num_stages=1)
buf53 = reinterpret_tensor(buf55, (4, 128, 8, 8), (12288, 64, 8, 1), 0)
triton_poi_fused__unsafe_index_15[grid(32768)](buf52, buf47, buf48,
buf51, buf53, 32768, XBLOCK=256, num_warps=4, num_stages=1)
buf56 = extern_kernels.convolution(buf55, primals_22, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf56, (4, 64, 8, 8), (4096, 64, 8, 1))
buf57 = buf56
del buf56
triton_poi_fused_convolution_9[grid(16384)](buf57, primals_23,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_23
buf58 = extern_kernels.convolution(buf57, primals_24, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf58, (4, 64, 8, 8), (4096, 64, 8, 1))
buf59 = buf58
del buf58
buf60 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 1, 1), torch.
float32)
buf61 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
buf63 = reinterpret_tensor(buf61, (1, 256, 1, 1), (256, 1, 1, 1), 0)
del buf61
triton_per_fused__native_batch_norm_legit_convolution_16[grid(256)](
buf59, buf63, primals_25, buf60, 256, 64, XBLOCK=8, num_warps=4,
num_stages=1)
del primals_25
buf64 = empty_strided_cuda((16,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_17[grid(16)](buf64, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf65 = reinterpret_tensor(buf67, (4, 64, 16, 16), (24576, 256, 16,
1), 0)
triton_poi_fused__unsafe_index_18[grid(65536)](buf64, buf59, buf60,
buf63, buf65, 65536, XBLOCK=512, num_warps=4, num_stages=1)
buf68 = extern_kernels.convolution(buf67, primals_26, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf68, (4, 32, 16, 16), (8192, 256, 16, 1))
buf69 = buf68
del buf68
triton_poi_fused_convolution_6[grid(32768)](buf69, primals_27,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_27
buf70 = extern_kernels.convolution(buf69, primals_28, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf70, (4, 32, 16, 16), (8192, 256, 16, 1))
buf71 = buf70
del buf70
buf72 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 1, 1), torch.
float32)
buf73 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128),
torch.float32)
buf75 = reinterpret_tensor(buf73, (1, 128, 1, 1), (128, 1, 1, 1), 0)
del buf73
triton_per_fused__native_batch_norm_legit_convolution_19[grid(128)](
buf71, buf75, primals_29, buf72, 128, 256, num_warps=2,
num_stages=1)
del primals_29
buf76 = empty_strided_cuda((32,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_20[grid(32)](buf76, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf77 = reinterpret_tensor(buf79, (4, 32, 32, 32), (49152, 1024, 32,
1), 0)
triton_poi_fused__unsafe_index_21[grid(131072)](buf76, buf71, buf72,
buf75, buf77, 131072, XBLOCK=512, num_warps=8, num_stages=1)
buf80 = extern_kernels.convolution(buf79, primals_30, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf80, (4, 16, 32, 32), (16384, 1024, 32, 1))
buf81 = buf80
del buf80
triton_poi_fused_convolution_3[grid(65536)](buf81, primals_31,
65536, XBLOCK=256, num_warps=4, num_stages=1)
del primals_31
buf82 = extern_kernels.convolution(buf81, primals_32, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf82, (4, 16, 32, 32), (16384, 1024, 32, 1))
buf83 = buf82
del buf82
buf84 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 1, 1), torch.float32)
buf85 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.
float32)
buf87 = reinterpret_tensor(buf85, (1, 64, 1, 1), (64, 1, 1, 1), 0)
del buf85
triton_per_fused__native_batch_norm_legit_convolution_22[grid(64)](
buf83, buf87, primals_33, buf84, 64, 1024, num_warps=8,
num_stages=1)
del primals_33
buf88 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_23[grid(64)](buf88, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf89 = reinterpret_tensor(buf91, (4, 16, 64, 64), (98304, 4096, 64,
1), 0)
triton_poi_fused__unsafe_index_24[grid(262144)](buf88, buf83, buf84,
buf87, buf89, 262144, XBLOCK=512, num_warps=8, num_stages=1)
buf92 = extern_kernels.convolution(buf91, primals_34, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf92, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf93 = buf92
del buf92
triton_poi_fused_convolution_0[grid(131072)](buf93, primals_35,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_35
buf94 = extern_kernels.convolution(buf93, primals_36, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf94, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf95 = buf94
del buf94
buf96 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.
float32)
buf100 = empty_strided_cuda((4, 8, 64, 64), (32768, 4096, 64, 1),
torch.float32)
buf99 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.
float32)
triton_red_fused__native_batch_norm_legit_convolution_leaky_relu_25[
grid(32)](buf95, primals_37, buf96, buf100, buf99, 32, 4096,
XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1)
del primals_37
buf101 = extern_kernels.convolution(buf100, primals_38, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf101, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf102 = buf101
del buf101
triton_poi_fused_convolution_26[grid(16384)](buf102, primals_39,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_39
buf103 = extern_kernels.convolution(buf102, primals_40, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf103, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf109 = extern_kernels.convolution(primals_1, primals_42, stride=(
1, 1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf109, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf110 = buf109
del buf109
triton_poi_fused_convolution_26[grid(16384)](buf110, primals_43,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_43
buf111 = extern_kernels.convolution(buf110, primals_44, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf111, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf104 = buf103
del buf103
buf105 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 1, 1), torch.float32)
buf106 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32)
buf108 = reinterpret_tensor(buf106, (1, 4, 1, 1), (4, 1, 1, 1), 0)
del buf106
buf112 = buf111
del buf111
buf113 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 1, 1), torch.float32)
buf114 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32)
buf116 = reinterpret_tensor(buf114, (1, 4, 1, 1), (4, 1, 1, 1), 0)
del buf114
buf117 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1),
torch.float32)
triton_red_fused__native_batch_norm_legit_add_convolution_27[grid(4)](
buf104, buf108, buf112, buf116, primals_41, primals_45, buf105,
buf113, buf117, 4, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
del primals_41
del primals_45
return (buf117, primals_1, primals_2, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, primals_16, primals_18,
primals_20, primals_22, primals_24, primals_26, primals_28,
primals_30, primals_32, primals_34, primals_36, primals_38,
primals_40, primals_42, primals_44, buf1, buf3, reinterpret_tensor(
buf7, (32,), (1,), 0), buf8, buf9, buf10, buf12, buf14,
reinterpret_tensor(buf18, (64,), (1,), 0), buf19, buf20, buf21,
buf23, buf25, reinterpret_tensor(buf29, (128,), (1,), 0), buf30,
buf31, buf32, buf34, buf36, reinterpret_tensor(buf40, (256,), (1,),
0), buf41, buf42, buf43, buf45, buf47, buf48, buf51, buf52, buf55,
buf57, buf59, buf60, buf63, buf64, buf67, buf69, buf71, buf72,
buf75, buf76, buf79, buf81, buf83, buf84, buf87, buf88, buf91,
buf93, buf95, reinterpret_tensor(buf99, (32,), (1,), 0), buf100,
buf102, buf104, buf105, buf108, buf110, buf112, buf113, buf116,
reinterpret_tensor(buf96, (1, 32, 1, 1), (32, 1, 1, 1), 0),
reinterpret_tensor(buf37, (1, 256, 1, 1), (256, 1, 1, 1), 0),
reinterpret_tensor(buf26, (1, 128, 1, 1), (128, 1, 1, 1), 0),
reinterpret_tensor(buf15, (1, 64, 1, 1), (64, 1, 1, 1), 0),
reinterpret_tensor(buf4, (1, 32, 1, 1), (32, 1, 1, 1), 0))
class BasicConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(BasicConvBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3,
padding=1)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3,
padding=1)
self.norm = nn.InstanceNorm2d(out_channels)
def forward(self, x):
x = F.leaky_relu(self.norm(self.conv2(self.conv1(x))),
negative_slope=0.001, inplace=True)
return x
class BasicUNetNew(nn.Module):
def __init__(self, n_input_channel=1, n_output_channel=1, nic=8,
residual=True):
super().__init__()
self.residual = residual
self.down1 = nn.MaxPool2d(kernel_size=2)
self.down2 = nn.MaxPool2d(kernel_size=2)
self.down3 = nn.MaxPool2d(kernel_size=2)
self.down4 = nn.MaxPool2d(kernel_size=2)
self.up1 = lambda x: nn.functional.interpolate(x, mode='nearest',
scale_factor=2)
self.up2 = lambda x: nn.functional.interpolate(x, mode='nearest',
scale_factor=2)
self.up3 = lambda x: nn.functional.interpolate(x, mode='nearest',
scale_factor=2)
self.up4 = lambda x: nn.functional.interpolate(x, mode='nearest',
scale_factor=2)
self.conv1 = BasicConvBlock(n_input_channel, nic)
self.conv2 = BasicConvBlock(nic, nic * 2)
self.conv3 = BasicConvBlock(nic * 2, nic * 4)
self.conv4 = BasicConvBlock(nic * 4, nic * 8)
self.conv5 = BasicConvBlock(nic * 8, nic * 16)
self.conv6 = BasicConvBlock(nic * 3 * 8, nic * 8)
self.conv7 = BasicConvBlock(nic * 3 * 4, nic * 4)
self.conv8 = BasicConvBlock(nic * 3 * 2, nic * 2)
self.conv9 = BasicConvBlock(nic * 3, nic)
self.conv10 = BasicConvBlock(nic, n_output_channel)
if self.residual:
self.conv11 = BasicConvBlock(n_input_channel, n_output_channel)
def forward(self, input_0):
primals_2 = self.conv1.conv1.weight
primals_3 = self.conv1.conv1.bias
primals_4 = self.conv1.conv2.weight
primals_5 = self.conv1.conv2.bias
primals_6 = self.conv2.conv1.weight
primals_7 = self.conv2.conv1.bias
primals_8 = self.conv2.conv2.weight
primals_9 = self.conv2.conv2.bias
primals_10 = self.conv3.conv1.weight
primals_11 = self.conv3.conv1.bias
primals_12 = self.conv3.conv2.weight
primals_13 = self.conv3.conv2.bias
primals_14 = self.conv4.conv1.weight
primals_15 = self.conv4.conv1.bias
primals_16 = self.conv4.conv2.weight
primals_17 = self.conv4.conv2.bias
primals_18 = self.conv5.conv1.weight
primals_19 = self.conv5.conv1.bias
primals_20 = self.conv5.conv2.weight
primals_21 = self.conv5.conv2.bias
primals_22 = self.conv6.conv1.weight
primals_23 = self.conv6.conv1.bias
primals_24 = self.conv6.conv2.weight
primals_25 = self.conv6.conv2.bias
primals_26 = self.conv7.conv1.weight
primals_27 = self.conv7.conv1.bias
primals_28 = self.conv7.conv2.weight
primals_29 = self.conv7.conv2.bias
primals_30 = self.conv8.conv1.weight
primals_31 = self.conv8.conv1.bias
primals_32 = self.conv8.conv2.weight
primals_33 = self.conv8.conv2.bias
primals_34 = self.conv9.conv1.weight
primals_35 = self.conv9.conv1.bias
primals_36 = self.conv9.conv2.weight
primals_37 = self.conv9.conv2.bias
primals_38 = self.conv10.conv1.weight
primals_39 = self.conv10.conv1.bias
primals_40 = self.conv10.conv2.weight
primals_41 = self.conv10.conv2.bias
primals_42 = self.conv11.conv1.weight
primals_43 = self.conv11.conv1.bias
primals_44 = self.conv11.conv2.weight
primals_45 = self.conv11.conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41, primals_42, primals_43, primals_44,
primals_45])
return output[0]
| royerloic/aydin | BasicUNet | false | 16,420 | [
"BSD-3-Clause"
] | 78 | f9c61a24030891d008c318b250da5faec69fcd7d | https://github.com/royerloic/aydin/tree/f9c61a24030891d008c318b250da5faec69fcd7d |
HSwish | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/4n/c4n4t4ob46tjofubvbx7kuypv4fkq77j7kjunchukdfb6wpttdx6.py
# Topologically Sorted Source Nodes: [add, relu6, div_, mul], Original ATen: [aten.add, aten.hardtanh, aten.div, aten.mul]
# Source node to ATen node mapping:
# add => add
# div_ => div
# mul => mul
# relu6 => clamp_max, clamp_min
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 3), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 6), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%clamp_max, 6), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %div), kwargs = {})
triton_poi_fused_add_div_hardtanh_mul_0 = async_compile.triton('triton_poi_fused_add_div_hardtanh_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_hardtanh_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = 0.16666666666666666
tmp8 = tmp6 * tmp7
tmp9 = tmp0 * tmp8
tl.store(out_ptr0 + (x0), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, relu6, div_, mul], Original ATen: [aten.add, aten.hardtanh, aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_mul_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.utils.data
class HSwish(nn.Module):
"""Hard Swish activation function.
See: https://arxiv.org/abs/1905.02244
"""
def forward(self, x):
return x * nn.functional.relu6(x + 3).div_(6)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = 0.16666666666666666
tmp8 = tmp6 * tmp7
tmp9 = tmp0 * tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_mul_0[grid(256)](arg0_1, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class HSwishNew(nn.Module):
"""Hard Swish activation function.
See: https://arxiv.org/abs/1905.02244
"""
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| shiyuann/determined | HSwish | false | 16,421 | [
"Apache-2.0"
] | 1,729 | 856123ae112759de7bded9bc7bd0e07055f2174b | https://github.com/shiyuann/determined/tree/856123ae112759de7bded9bc7bd0e07055f2174b |
StyledConv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/wi/cwiyl3lwwtancorrifw77xt3aqb4lermdintht45zvkj3bg54nbl.py
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, 0.5), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/2o/c2oqkq7zaubqmw7vuixxlseb2ff5jzqqbyczicxlmsahuxwdpdyp.py
# Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul_1 => mul_1
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, 1), kwargs = {})
triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ri/criuvsdl3sferb4bb6ci5zaps3wys7xxcpybz7vfo2ba4q7cuq6c.py
# Topologically Sorted Source Nodes: [mul_2, weight, pow_1, sum_1, add, demod, weight_1], Original ATen: [aten.mul, aten.pow, aten.sum, aten.add, aten.rsqrt]
# Source node to ATen node mapping:
# add => add
# demod => rsqrt
# mul_2 => mul_2
# pow_1 => pow_1
# sum_1 => sum_1
# weight => mul_3
# weight_1 => mul_4
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_5, 0.125), kwargs = {})
# %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %view), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mul_3, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [2, 3, 4]), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-08), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %view_1), kwargs = {})
triton_per_fused_add_mul_pow_rsqrt_sum_2 = async_compile.triton('triton_per_fused_add_mul_pow_rsqrt_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 64],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mul_pow_rsqrt_sum_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mul_pow_rsqrt_sum_2(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r5 = rindex
x0 = xindex % 4
r3 = (rindex // 16)
x1 = (xindex // 4)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r5 + (64*x0)), xmask, eviction_policy='evict_last', other=0.0)
tmp3 = tl.load(in_ptr1 + (r3 + (4*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp1 = 0.125
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp4
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = 1e-08
tmp11 = tmp9 + tmp10
tmp12 = libdevice.rsqrt(tmp11)
tmp13 = tmp4 * tmp12
tl.debug_barrier()
tl.store(in_out_ptr0 + (x4), tmp12, xmask)
tl.store(out_ptr0 + (r5 + (64*x4)), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/m5/cm56eqdvtyi73jnsik4k6g5v62qofgoqvpcasegx6ljukv6gdjyd.py
# Topologically Sorted Source Nodes: [mul_5, out_3, add_2, leaky_relu, out_4], Original ATen: [aten.mul, aten.add, aten.leaky_relu]
# Source node to ATen node mapping:
# add_2 => add_2
# leaky_relu => gt, mul_6, where
# mul_5 => mul_5
# out_3 => add_1
# out_4 => mul_7
# Graph fragment:
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_6, %normal_functional), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_4, %mul_5), kwargs = {})
# %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %view_5), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_2, 0), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 0.2), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add_2, %mul_6), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where, 1.4142135623730951), kwargs = {})
triton_poi_fused_add_leaky_relu_mul_3 = async_compile.triton('triton_poi_fused_add_leaky_relu_mul_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_leaky_relu_mul_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_leaky_relu_mul_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 25
x2 = (xindex // 100)
x1 = (xindex // 25) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tl.load(in_ptr2 + (x0 + (25*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 0.0
tmp9 = tmp7 > tmp8
tmp10 = 0.2
tmp11 = tmp7 * tmp10
tmp12 = tl.where(tmp9, tmp7, tmp11)
tmp13 = 1.4142135623730951
tmp14 = tmp12 * tmp13
tl.store(out_ptr0 + (x3), tmp9, xmask)
tl.store(out_ptr1 + (x3), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(primals_6, (1, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_2, buf0, 16, grid=grid(16), stream=stream0)
del primals_2
buf1 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul]
triton_poi_fused_mul_1.run(primals_3, buf1, 4, grid=grid(4), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_1, out], Original ATen: [aten.mul, aten.addmm]
extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del buf1
buf3 = buf0; del buf0 # reuse
buf4 = buf3; del buf3 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_2, weight, pow_1, sum_1, add, demod, weight_1], Original ATen: [aten.mul, aten.pow, aten.sum, aten.add, aten.rsqrt]
triton_per_fused_add_mul_pow_rsqrt_sum_2.run(buf4, primals_5, buf2, buf5, 16, 64, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf6, (1, 16, 5, 5), (400, 25, 5, 1))
buf7 = empty_strided_cuda((4, 1, 5, 5), (25, 25, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [noise], Original ATen: [aten.normal_functional]
buf8 = torch.ops.aten.normal_functional.default(buf7)
del buf7
buf9 = buf8
del buf8
buf10 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.bool)
buf11 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_5, out_3, add_2, leaky_relu, out_4], Original ATen: [aten.mul, aten.add, aten.leaky_relu]
triton_poi_fused_add_leaky_relu_mul_3.run(buf6, primals_6, buf9, primals_7, buf10, buf11, 400, grid=grid(400), stream=stream0)
del buf6
del primals_6
del primals_7
return (buf11, primals_4, primals_5, buf2, buf4, reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), buf9, buf10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import warnings
import numpy as np
from torch import nn
from torch.nn import functional as F
import torch.utils.cpp_extension
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
rest_dim = [1] * (input.ndim - bias.ndim - 1)
input = input
return F.leaky_relu(input + bias.view(1, bias.shape[0], *rest_dim),
negative_slope=negative_slope) * scale
def make_kernel(k):
k = torch.tensor(k, dtype=torch.float32)
if k.ndim == 1:
k = k[None, :] * k[:, None]
k /= k.sum()
return k
def _init():
global _inited, _plugin
if not _inited:
sources = ['upfirdn2d.cpp', 'upfirdn2d.cu']
sources = [os.path.join(os.path.dirname(__file__), s) for s in sources]
try:
_plugin = custom_ops.get_plugin('upfirdn2d_plugin', sources=
sources, extra_cuda_cflags=['--use_fast_math'])
except:
warnings.warn(
"""Failed to build CUDA kernels for upfirdn2d. Falling back to slow reference implementation. Details:
"""
+ traceback.format_exc())
return _plugin is not None
def _get_filter_size(f):
if f is None:
return 1, 1
assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
fw = f.shape[-1]
fh = f.shape[0]
with misc.suppress_tracer_warnings():
fw = int(fw)
fh = int(fh)
misc.assert_shape(f, [fh, fw][:f.ndim])
assert fw >= 1 and fh >= 1
return fw, fh
def _parse_padding(padding):
if isinstance(padding, int):
padding = [padding, padding]
assert isinstance(padding, (list, tuple))
assert all(isinstance(x, int) for x in padding)
if len(padding) == 2:
padx, pady = padding
padding = [padx, padx, pady, pady]
padx0, padx1, pady0, pady1 = padding
return padx0, padx1, pady0, pady1
def _parse_scaling(scaling):
if isinstance(scaling, int):
scaling = [scaling, scaling]
assert isinstance(scaling, (list, tuple))
assert all(isinstance(x, int) for x in scaling)
sx, sy = scaling
assert sx >= 1 and sy >= 1
return sx, sy
def _upfirdn2d_cuda(up=1, down=1, padding=0, flip_filter=False, gain=1):
"""Fast CUDA implementation of `upfirdn2d()` using custom ops.
"""
upx, upy = _parse_scaling(up)
downx, downy = _parse_scaling(down)
padx0, padx1, pady0, pady1 = _parse_padding(padding)
key = upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain
if key in _upfirdn2d_cuda_cache:
return _upfirdn2d_cuda_cache[key]
class Upfirdn2dCuda(torch.autograd.Function):
@staticmethod
def forward(ctx, x, f):
assert isinstance(x, torch.Tensor) and x.ndim == 4
if f is None:
f = torch.ones([1, 1], dtype=torch.float32, device=x.device)
assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
y = x
if f.ndim == 2:
y = _plugin.upfirdn2d(y, f, upx, upy, downx, downy, padx0,
padx1, pady0, pady1, flip_filter, gain)
else:
y = _plugin.upfirdn2d(y, f.unsqueeze(0), upx, 1, downx, 1,
padx0, padx1, 0, 0, flip_filter, np.sqrt(gain))
y = _plugin.upfirdn2d(y, f.unsqueeze(1), 1, upy, 1, downy,
0, 0, pady0, pady1, flip_filter, np.sqrt(gain))
ctx.save_for_backward(f)
ctx.x_shape = x.shape
return y
@staticmethod
def backward(ctx, dy):
f, = ctx.saved_tensors
_, _, ih, iw = ctx.x_shape
_, _, oh, ow = dy.shape
fw, fh = _get_filter_size(f)
p = [fw - padx0 - 1, iw * upx - ow * downx + padx0 - upx + 1,
fh - pady0 - 1, ih * upy - oh * downy + pady0 - upy + 1]
dx = None
df = None
if ctx.needs_input_grad[0]:
dx = _upfirdn2d_cuda(up=down, down=up, padding=p,
flip_filter=not flip_filter, gain=gain).apply(dy, f)
assert not ctx.needs_input_grad[1]
return dx, df
_upfirdn2d_cuda_cache[key] = Upfirdn2dCuda
return Upfirdn2dCuda
def upfirdn2d(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1,
impl='cuda'):
"""Pad, upsample, filter, and downsample a batch of 2D images.
Performs the following sequence of operations for each channel:
1. Upsample the image by inserting N-1 zeros after each pixel (`up`).
2. Pad the image with the specified number of zeros on each side (`padding`).
Negative padding corresponds to cropping the image.
3. Convolve the image with the specified 2D FIR filter (`f`), shrinking it
so that the footprint of all output pixels lies within the input image.
4. Downsample the image by keeping every Nth pixel (`down`).
This sequence of operations bears close resemblance to scipy.signal.upfirdn().
The fused op is considerably more efficient than performing the same calculation
using standard PyTorch ops. It supports gradients of arbitrary order.
Args:
x: Float32/float64/float16 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
f: Float32 FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
up: Integer upsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
down: Integer downsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
padding: Padding with respect to the upsampled image. Can be a single number
or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
flip_filter: False = convolution, True = correlation (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
assert isinstance(x, torch.Tensor)
assert impl in ['ref', 'cuda']
if impl == 'cuda' and x.device.type == 'cuda' and _init():
return _upfirdn2d_cuda(up=up, down=down, padding=padding,
flip_filter=flip_filter, gain=gain).apply(x, f)
return _upfirdn2d_ref(x, f, up=up, down=down, padding=padding,
flip_filter=flip_filter, gain=gain)
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1,
activation=None):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
else:
self.bias = None
self.activation = activation
self.scale = 1 / math.sqrt(in_dim) * lr_mul
self.lr_mul = lr_mul
def forward(self, input):
if self.activation:
out = F.linear(input, self.weight * self.scale)
out = fused_leaky_relu(out, self.bias * self.lr_mul)
else:
out = F.linear(input, self.weight * self.scale, bias=self.bias *
self.lr_mul)
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
)
class Blur(nn.Module):
def __init__(self, kernel, pad, upsample_factor=1):
super().__init__()
kernel = make_kernel(kernel)
if upsample_factor > 1:
kernel = kernel * upsample_factor ** 2
self.register_buffer('kernel', kernel)
self.pad = pad
def forward(self, input):
out = upfirdn2d(input, self.kernel, pad=self.pad)
return out
class ModulatedConv2d(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, style_dim,
demodulate=True, upsample=False, downsample=False, blur_kernel=[1,
3, 3, 1]):
super().__init__()
self.eps = 1e-08
self.kernel_size = kernel_size
self.in_channel = in_channel
self.out_channel = out_channel
self.upsample = upsample
self.downsample = downsample
if upsample:
factor = 2
p = len(blur_kernel) - factor - (kernel_size - 1)
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2 + 1
self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor
=factor)
if downsample:
factor = 2
p = len(blur_kernel) - factor + (kernel_size - 1)
pad0 = (p + 1) // 2
pad1 = p // 2
self.blur = Blur(blur_kernel, pad=(pad0, pad1))
fan_in = in_channel * kernel_size ** 2
self.scale = 1 / math.sqrt(fan_in)
self.padding = kernel_size // 2
self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel,
kernel_size, kernel_size))
self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
self.demodulate = demodulate
def __repr__(self):
return (
f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})'
)
def forward(self, input, style):
batch, in_channel, height, width = input.shape
style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
weight = self.scale * self.weight * style
if self.demodulate:
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08)
weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
weight = weight.view(batch * self.out_channel, in_channel, self.
kernel_size, self.kernel_size)
if self.upsample:
input = input.view(1, batch * in_channel, height, width)
weight = weight.view(batch, self.out_channel, in_channel, self.
kernel_size, self.kernel_size)
weight = weight.transpose(1, 2).reshape(batch * in_channel,
self.out_channel, self.kernel_size, self.kernel_size)
out = F.conv_transpose2d(input, weight, padding=0, stride=2,
groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
out = self.blur(out)
elif self.downsample:
input = self.blur(input)
_, _, height, width = input.shape
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
else:
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=self.padding, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
return out
class NoiseInjection(nn.Module):
def __init__(self):
super().__init__()
self.weight = nn.Parameter(torch.zeros(1))
def forward(self, image, noise=None):
if noise is None:
batch, _, height, width = image.shape
noise = image.new_empty(batch, 1, height, width).normal_()
return image + self.weight * noise
class FusedLeakyReLU(nn.Module):
def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5):
super().__init__()
self.bias = nn.Parameter(torch.zeros(channel))
self.negative_slope = negative_slope
self.scale = scale
def forward(self, input):
return fused_leaky_relu(input, self.bias, self.negative_slope, self
.scale)
class StyledConv(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, style_dim,
upsample=False, blur_kernel=[1, 3, 3, 1], demodulate=True):
super().__init__()
self.conv = ModulatedConv2d(in_channel, out_channel, kernel_size,
style_dim, upsample=upsample, blur_kernel=blur_kernel,
demodulate=demodulate)
self.noise = NoiseInjection()
self.activate = FusedLeakyReLU(out_channel)
def forward(self, input, style, noise=None):
out = self.conv(input, style)
out = self.noise(out, noise=noise)
out = self.activate(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4,
'style_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import warnings
import numpy as np
from torch import nn
from torch.nn import functional as F
import torch.utils.cpp_extension
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_per_fused_add_mul_pow_rsqrt_sum_2(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r5 = rindex
x0 = xindex % 4
r3 = rindex // 16
x1 = xindex // 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r5 + 64 * x0), xmask, eviction_policy=
'evict_last', other=0.0)
tmp3 = tl.load(in_ptr1 + (r3 + 4 * x1), xmask, eviction_policy=
'evict_last', other=0.0)
tmp1 = 0.125
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp4
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = 1e-08
tmp11 = tmp9 + tmp10
tmp12 = libdevice.rsqrt(tmp11)
tmp13 = tmp4 * tmp12
tl.debug_barrier()
tl.store(in_out_ptr0 + x4, tmp12, xmask)
tl.store(out_ptr0 + (r5 + 64 * x4), tmp13, xmask)
@triton.jit
def triton_poi_fused_add_leaky_relu_mul_3(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 25
x2 = xindex // 100
x1 = xindex // 25 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tl.load(in_ptr2 + (x0 + 25 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 0.0
tmp9 = tmp7 > tmp8
tmp10 = 0.2
tmp11 = tmp7 * tmp10
tmp12 = tl.where(tmp9, tmp7, tmp11)
tmp13 = 1.4142135623730951
tmp14 = tmp12 * tmp13
tl.store(out_ptr0 + x3, tmp9, xmask)
tl.store(out_ptr1 + x3, tmp14, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(primals_6, (1,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_mul_1[grid(4)](primals_3, buf1, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4,
4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del buf1
buf3 = buf0
del buf0
buf4 = buf3
del buf3
buf5 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_per_fused_add_mul_pow_rsqrt_sum_2[grid(16)](buf4, primals_5,
buf2, buf5, 16, 64, XBLOCK=8, num_warps=4, num_stages=1)
buf6 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1,
16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4,
4, 4), (64, 16, 4, 1), 0), stride=(1, 1), padding=(2, 2),
dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=4, bias=None)
assert_size_stride(buf6, (1, 16, 5, 5), (400, 25, 5, 1))
buf7 = empty_strided_cuda((4, 1, 5, 5), (25, 25, 5, 1), torch.float32)
buf8 = torch.ops.aten.normal_functional.default(buf7)
del buf7
buf9 = buf8
del buf8
buf10 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.bool)
buf11 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32
)
triton_poi_fused_add_leaky_relu_mul_3[grid(400)](buf6, primals_6,
buf9, primals_7, buf10, buf11, 400, XBLOCK=256, num_warps=4,
num_stages=1)
del buf6
del primals_6
del primals_7
return buf11, primals_4, primals_5, buf2, buf4, reinterpret_tensor(buf5,
(16, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (1,
16, 4, 4), (256, 16, 4, 1), 0), buf9, buf10
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
rest_dim = [1] * (input.ndim - bias.ndim - 1)
input = input
return F.leaky_relu(input + bias.view(1, bias.shape[0], *rest_dim),
negative_slope=negative_slope) * scale
def make_kernel(k):
k = torch.tensor(k, dtype=torch.float32)
if k.ndim == 1:
k = k[None, :] * k[:, None]
k /= k.sum()
return k
def _init():
global _inited, _plugin
if not _inited:
sources = ['upfirdn2d.cpp', 'upfirdn2d.cu']
sources = [os.path.join(os.path.dirname(__file__), s) for s in sources]
try:
_plugin = custom_ops.get_plugin('upfirdn2d_plugin', sources=
sources, extra_cuda_cflags=['--use_fast_math'])
except:
warnings.warn(
"""Failed to build CUDA kernels for upfirdn2d. Falling back to slow reference implementation. Details:
"""
+ traceback.format_exc())
return _plugin is not None
def _get_filter_size(f):
if f is None:
return 1, 1
assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
fw = f.shape[-1]
fh = f.shape[0]
with misc.suppress_tracer_warnings():
fw = int(fw)
fh = int(fh)
misc.assert_shape(f, [fh, fw][:f.ndim])
assert fw >= 1 and fh >= 1
return fw, fh
def _parse_padding(padding):
if isinstance(padding, int):
padding = [padding, padding]
assert isinstance(padding, (list, tuple))
assert all(isinstance(x, int) for x in padding)
if len(padding) == 2:
padx, pady = padding
padding = [padx, padx, pady, pady]
padx0, padx1, pady0, pady1 = padding
return padx0, padx1, pady0, pady1
def _parse_scaling(scaling):
if isinstance(scaling, int):
scaling = [scaling, scaling]
assert isinstance(scaling, (list, tuple))
assert all(isinstance(x, int) for x in scaling)
sx, sy = scaling
assert sx >= 1 and sy >= 1
return sx, sy
def _upfirdn2d_cuda(up=1, down=1, padding=0, flip_filter=False, gain=1):
"""Fast CUDA implementation of `upfirdn2d()` using custom ops.
"""
upx, upy = _parse_scaling(up)
downx, downy = _parse_scaling(down)
padx0, padx1, pady0, pady1 = _parse_padding(padding)
key = upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain
if key in _upfirdn2d_cuda_cache:
return _upfirdn2d_cuda_cache[key]
class Upfirdn2dCuda(torch.autograd.Function):
@staticmethod
def forward(ctx, x, f):
assert isinstance(x, torch.Tensor) and x.ndim == 4
if f is None:
f = torch.ones([1, 1], dtype=torch.float32, device=x.device)
assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
y = x
if f.ndim == 2:
y = _plugin.upfirdn2d(y, f, upx, upy, downx, downy, padx0,
padx1, pady0, pady1, flip_filter, gain)
else:
y = _plugin.upfirdn2d(y, f.unsqueeze(0), upx, 1, downx, 1,
padx0, padx1, 0, 0, flip_filter, np.sqrt(gain))
y = _plugin.upfirdn2d(y, f.unsqueeze(1), 1, upy, 1, downy,
0, 0, pady0, pady1, flip_filter, np.sqrt(gain))
ctx.save_for_backward(f)
ctx.x_shape = x.shape
return y
@staticmethod
def backward(ctx, dy):
f, = ctx.saved_tensors
_, _, ih, iw = ctx.x_shape
_, _, oh, ow = dy.shape
fw, fh = _get_filter_size(f)
p = [fw - padx0 - 1, iw * upx - ow * downx + padx0 - upx + 1,
fh - pady0 - 1, ih * upy - oh * downy + pady0 - upy + 1]
dx = None
df = None
if ctx.needs_input_grad[0]:
dx = _upfirdn2d_cuda(up=down, down=up, padding=p,
flip_filter=not flip_filter, gain=gain).apply(dy, f)
assert not ctx.needs_input_grad[1]
return dx, df
_upfirdn2d_cuda_cache[key] = Upfirdn2dCuda
return Upfirdn2dCuda
def upfirdn2d(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1,
impl='cuda'):
"""Pad, upsample, filter, and downsample a batch of 2D images.
Performs the following sequence of operations for each channel:
1. Upsample the image by inserting N-1 zeros after each pixel (`up`).
2. Pad the image with the specified number of zeros on each side (`padding`).
Negative padding corresponds to cropping the image.
3. Convolve the image with the specified 2D FIR filter (`f`), shrinking it
so that the footprint of all output pixels lies within the input image.
4. Downsample the image by keeping every Nth pixel (`down`).
This sequence of operations bears close resemblance to scipy.signal.upfirdn().
The fused op is considerably more efficient than performing the same calculation
using standard PyTorch ops. It supports gradients of arbitrary order.
Args:
x: Float32/float64/float16 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
f: Float32 FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
up: Integer upsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
down: Integer downsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
padding: Padding with respect to the upsampled image. Can be a single number
or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
flip_filter: False = convolution, True = correlation (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
assert isinstance(x, torch.Tensor)
assert impl in ['ref', 'cuda']
if impl == 'cuda' and x.device.type == 'cuda' and _init():
return _upfirdn2d_cuda(up=up, down=down, padding=padding,
flip_filter=flip_filter, gain=gain).apply(x, f)
return _upfirdn2d_ref(x, f, up=up, down=down, padding=padding,
flip_filter=flip_filter, gain=gain)
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1,
activation=None):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
else:
self.bias = None
self.activation = activation
self.scale = 1 / math.sqrt(in_dim) * lr_mul
self.lr_mul = lr_mul
def forward(self, input):
if self.activation:
out = F.linear(input, self.weight * self.scale)
out = fused_leaky_relu(out, self.bias * self.lr_mul)
else:
out = F.linear(input, self.weight * self.scale, bias=self.bias *
self.lr_mul)
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
)
class Blur(nn.Module):
def __init__(self, kernel, pad, upsample_factor=1):
super().__init__()
kernel = make_kernel(kernel)
if upsample_factor > 1:
kernel = kernel * upsample_factor ** 2
self.register_buffer('kernel', kernel)
self.pad = pad
def forward(self, input):
out = upfirdn2d(input, self.kernel, pad=self.pad)
return out
class ModulatedConv2d(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, style_dim,
demodulate=True, upsample=False, downsample=False, blur_kernel=[1,
3, 3, 1]):
super().__init__()
self.eps = 1e-08
self.kernel_size = kernel_size
self.in_channel = in_channel
self.out_channel = out_channel
self.upsample = upsample
self.downsample = downsample
if upsample:
factor = 2
p = len(blur_kernel) - factor - (kernel_size - 1)
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2 + 1
self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor
=factor)
if downsample:
factor = 2
p = len(blur_kernel) - factor + (kernel_size - 1)
pad0 = (p + 1) // 2
pad1 = p // 2
self.blur = Blur(blur_kernel, pad=(pad0, pad1))
fan_in = in_channel * kernel_size ** 2
self.scale = 1 / math.sqrt(fan_in)
self.padding = kernel_size // 2
self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel,
kernel_size, kernel_size))
self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
self.demodulate = demodulate
def __repr__(self):
return (
f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})'
)
def forward(self, input, style):
batch, in_channel, height, width = input.shape
style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
weight = self.scale * self.weight * style
if self.demodulate:
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08)
weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
weight = weight.view(batch * self.out_channel, in_channel, self.
kernel_size, self.kernel_size)
if self.upsample:
input = input.view(1, batch * in_channel, height, width)
weight = weight.view(batch, self.out_channel, in_channel, self.
kernel_size, self.kernel_size)
weight = weight.transpose(1, 2).reshape(batch * in_channel,
self.out_channel, self.kernel_size, self.kernel_size)
out = F.conv_transpose2d(input, weight, padding=0, stride=2,
groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
out = self.blur(out)
elif self.downsample:
input = self.blur(input)
_, _, height, width = input.shape
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
else:
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=self.padding, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
return out
class NoiseInjection(nn.Module):
def __init__(self):
super().__init__()
self.weight = nn.Parameter(torch.zeros(1))
def forward(self, image, noise=None):
if noise is None:
batch, _, height, width = image.shape
noise = image.new_empty(batch, 1, height, width).normal_()
return image + self.weight * noise
class FusedLeakyReLU(nn.Module):
def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5):
super().__init__()
self.bias = nn.Parameter(torch.zeros(channel))
self.negative_slope = negative_slope
self.scale = scale
def forward(self, input):
return fused_leaky_relu(input, self.bias, self.negative_slope, self
.scale)
class StyledConvNew(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, style_dim,
upsample=False, blur_kernel=[1, 3, 3, 1], demodulate=True):
super().__init__()
self.conv = ModulatedConv2d(in_channel, out_channel, kernel_size,
style_dim, upsample=upsample, blur_kernel=blur_kernel,
demodulate=demodulate)
self.noise = NoiseInjection()
self.activate = FusedLeakyReLU(out_channel)
def forward(self, input_0, input_1):
primals_5 = self.conv.weight
primals_2 = self.conv.modulation.weight
primals_3 = self.conv.modulation.bias
primals_6 = self.noise.weight
primals_7 = self.activate.bias
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| phygitalism/PTI | StyledConv | false | 16,422 | [
"MIT"
] | 345 | adab2eb1d0e36ac5714e663e1fec9f85a0d51fbf | https://github.com/phygitalism/PTI/tree/adab2eb1d0e36ac5714e663e1fec9f85a0d51fbf |
AlphaEntropy | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/qk/cqk6uygkayz5rfjeyvroasluypdlp4rmaq2oooyljgtf6w22lklj.py
# Topologically Sorted Source Nodes: [mul, sum_1, mean], Original ATen: [aten.mul, aten.sum, aten.mean]
# Source node to ATen node mapping:
# mean => mean_1
# mul => mul
# sum_1 => sum_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg2_1, %arg3_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_1,), kwargs = {})
triton_per_fused_mean_mul_sum_0 = async_compile.triton('triton_per_fused_mean_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = (rindex // 16)
tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None)
tmp1 = tl.load(in_ptr1 + (r0 + (64*r1)), None)
tmp3 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None)
tmp4 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None)
tmp7 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None)
tmp8 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None)
tmp11 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None)
tmp12 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.sum(tmp15, 1)[:, None]
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp17, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/sy/csy5yghzybuog6gicj2rbi7yvelyylsnrsi3dumfcpwmxbd4e3be.py
# Topologically Sorted Source Nodes: [mul, sum_1, mean, p_loss, v_loss, add], Original ATen: [aten.mul, aten.sum, aten.mean, aten.neg, aten.mse_loss, aten.add]
# Source node to ATen node mapping:
# add => add
# mean => mean_1
# mul => mul
# p_loss => neg
# sum_1 => sum_1
# v_loss => mean, pow_1, sub
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg2_1, %arg3_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_1,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg, %mean), kwargs = {})
triton_per_fused_add_mean_mse_loss_mul_neg_sum_1 = async_compile.triton('triton_per_fused_add_mean_mse_loss_mul_neg_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_mse_loss_mul_neg_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mean_mse_loss_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp7 = tl.load(in_out_ptr0 + (0))
tmp8 = tl.broadcast_to(tmp7, [1])
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp9 = 64.0
tmp10 = tmp8 / tmp9
tmp11 = -tmp10
tmp12 = 256.0
tmp13 = tmp6 / tmp12
tmp14 = tmp11 + tmp13
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp14, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [mul, sum_1, mean], Original ATen: [aten.mul, aten.sum, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_mul_sum_0.run(arg2_1, arg3_1, buf0, 1, 64, grid=grid(1), stream=stream0)
del arg2_1
del arg3_1
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mul, sum_1, mean, p_loss, v_loss, add], Original ATen: [aten.mul, aten.sum, aten.mean, aten.neg, aten.mse_loss, aten.add]
triton_per_fused_add_mean_mse_loss_mul_neg_sum_1.run(buf2, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class AlphaEntropy(nn.Module):
def __init__(self):
super().__init__()
self.v_loss = nn.MSELoss()
def forward(self, props, v, pi, reward):
v_loss = self.v_loss(v, reward)
p_loss = -torch.mean(torch.sum(props * pi, 1))
return p_loss + v_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp4 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp7 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp8 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp11 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp12 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.sum(tmp15, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp17, None)
@triton.jit
def triton_per_fused_add_mean_mse_loss_mul_neg_sum_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp7 = tl.load(in_out_ptr0 + 0)
tmp8 = tl.broadcast_to(tmp7, [1])
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp9 = 64.0
tmp10 = tmp8 / tmp9
tmp11 = -tmp10
tmp12 = 256.0
tmp13 = tmp6 / tmp12
tmp14 = tmp11 + tmp13
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp14, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_mean_mul_sum_0[grid(1)](arg2_1, arg3_1, buf0, 1,
64, XBLOCK=1, num_warps=2, num_stages=1)
del arg2_1
del arg3_1
buf2 = buf0
del buf0
triton_per_fused_add_mean_mse_loss_mul_neg_sum_1[grid(1)](buf2,
arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf2,
class AlphaEntropyNew(nn.Module):
def __init__(self):
super().__init__()
self.v_loss = nn.MSELoss()
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
| shinoyuki222/torch-light | AlphaEntropy | false | 16,423 | [
"MIT"
] | 310 | 4799805d9bcae82a9f12a574dcf9fdd838c92ee9 | https://github.com/shinoyuki222/torch-light/tree/4799805d9bcae82a9f12a574dcf9fdd838c92ee9 |
MLP | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/dd/cddsugzjkfr45ivhqpnzfedr6lc5ptacwpecx6j7l2rmsfffadxm.py
# Topologically Sorted Source Nodes: [add, x_1, x_2], Original ATen: [aten.add, aten.relu]
# Source node to ATen node mapping:
# add => add
# x_1 => add_1
# x_2 => relu
# Graph fragment:
# %add_tensor_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_4, %primals_3), kwargs = {})
# %add_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_3, %primals_5), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_tensor_4, %add_tensor_3), kwargs = {})
# %add_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_7), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %add_tensor_2), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_1,), kwargs = {})
triton_poi_fused_add_relu_0 = async_compile.triton('triton_poi_fused_add_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_relu_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), None)
tmp4 = tl.load(in_ptr2 + (x0), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + (x2), None)
tmp8 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp11 = tl.full([1], 0, tl.int32)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tl.store(in_out_ptr0 + (x2), tmp12, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/f3/cf3aanfqkd3b3pvexih2slyq6527crsaqnmvr3rq6tpbn2nyl74i.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_3 => relu_1
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_9), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (4, 3, 1024), (3072, 1024, 1))
assert_size_stride(primals_2, (1024, 1024), (1024, 1))
assert_size_stride(primals_3, (1024, ), (1, ))
assert_size_stride(primals_4, (1024, 1024), (1024, 1))
assert_size_stride(primals_5, (1024, ), (1, ))
assert_size_stride(primals_6, (1024, 1024), (1024, 1))
assert_size_stride(primals_7, (1024, ), (1, ))
assert_size_stride(primals_8, (1024, 1024), (1024, 1))
assert_size_stride(primals_9, (1024, ), (1, ))
assert_size_stride(primals_10, (1024, 1024), (1024, 1))
assert_size_stride(primals_11, (1024, ), (1, ))
assert_size_stride(primals_12, (10, 1024), (1024, 1))
assert_size_stride(primals_13, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 1024), (3072, 1), 0), reinterpret_tensor(primals_2, (1024, 1024), (1, 1024), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 1024), (3072, 1), 1024), reinterpret_tensor(primals_4, (1024, 1024), (1, 1024), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 1024), (3072, 1), 2048), reinterpret_tensor(primals_6, (1024, 1024), (1, 1024), 0), out=buf2)
del primals_6
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [add, x_1, x_2], Original ATen: [aten.add, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_add_relu_0.run(buf3, primals_3, buf1, primals_5, buf2, primals_7, 4096, grid=grid(4096), stream=stream0)
del primals_3
del primals_5
del primals_7
buf4 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf3, reinterpret_tensor(primals_8, (1024, 1024), (1, 1024), 0), out=buf4)
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf5, primals_9, 4096, grid=grid(4096), stream=stream0)
del primals_9
buf6 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf5, reinterpret_tensor(primals_10, (1024, 1024), (1, 1024), 0), out=buf6)
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf7, primals_11, 4096, grid=grid(4096), stream=stream0)
del primals_11
buf8 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_13, buf7, reinterpret_tensor(primals_12, (1024, 10), (1, 1024), 0), alpha=1, beta=1, out=buf8)
del primals_13
return (buf8, reinterpret_tensor(primals_1, (4, 1024), (3072, 1), 0), reinterpret_tensor(primals_1, (4, 1024), (3072, 1), 1024), reinterpret_tensor(primals_1, (4, 1024), (3072, 1), 2048), buf3, buf5, buf7, primals_12, primals_10, primals_8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 3, 1024), (3072, 1024, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1024, 1024), (1024, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1024, 1024), (1024, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1024, 1024), (1024, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1024, 1024), (1024, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((1024, 1024), (1024, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((10, 1024), (1024, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
from torch import nn
import torch.nn.functional as F
import torch.cuda
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
class Butterfly(nn.Module):
"""Product of log N butterfly factors, each is a block 2x2 of diagonal matrices.
Compatible with torch.nn.Linear.
Parameters:
in_size: size of input
out_size: size of output
bias: If set to False, the layer will not learn an additive bias.
Default: ``True``
complex: whether complex or real
increasing_stride: whether the first butterfly block will multiply with increasing stride
(e.g. 1, 2, ..., n/2) or decreasing stride (e.g., n/2, n/4, ..., 1).
init: 'randn', 'ortho', or 'identity'. Whether the weight matrix should be initialized to
from randn twiddle, or to be randomly orthogonal/unitary, or to be the identity matrix.
nblocks: number of B or B^T blocks. The B and B^T will alternate.
"""
def __init__(self, in_size, out_size, bias=True, complex=False,
increasing_stride=True, init='randn', nblocks=1):
super().__init__()
self.in_size = in_size
log_n = int(math.ceil(math.log2(in_size)))
self.log_n = log_n
size = self.in_size_extended = 1 << log_n
self.out_size = out_size
self.nstacks = int(math.ceil(out_size / self.in_size_extended))
self.complex = complex
self.increasing_stride = increasing_stride
assert nblocks >= 1
self.nblocks = nblocks
dtype = torch.get_default_dtype(
) if not self.complex else real_dtype_to_complex[torch.
get_default_dtype()]
twiddle_shape = self.nstacks, nblocks, log_n, size // 2, 2, 2
assert init in ['randn', 'ortho', 'identity']
self.init = init
self.twiddle = nn.Parameter(torch.empty(twiddle_shape, dtype=dtype))
if bias:
self.bias = nn.Parameter(torch.empty(out_size, dtype=dtype))
else:
self.register_parameter('bias', None)
self.twiddle._is_structured = True
if self.complex:
self.twiddle = nn.Parameter(view_as_real(self.twiddle))
if self.bias is not None:
self.bias = nn.Parameter(view_as_real(self.bias))
self.reset_parameters()
def reset_parameters(self):
"""Initialize bias the same way as torch.nn.Linear."""
twiddle = self.twiddle if not self.complex else view_as_complex(self
.twiddle)
if self.init == 'randn':
scaling = 1.0 / math.sqrt(2)
with torch.no_grad():
twiddle.copy_(torch.randn(twiddle.shape, dtype=twiddle.
dtype) * scaling)
elif self.init == 'ortho':
twiddle_core_shape = twiddle.shape[:-2]
if not self.complex:
theta = torch.rand(twiddle_core_shape) * math.pi * 2
c, s = torch.cos(theta), torch.sin(theta)
det = torch.randint(0, 2, twiddle_core_shape, dtype=c.dtype
) * 2 - 1
with torch.no_grad():
twiddle.copy_(torch.stack((torch.stack((det * c, -det *
s), dim=-1), torch.stack((s, c), dim=-1)), dim=-2))
else:
phi = torch.asin(torch.sqrt(torch.rand(twiddle_core_shape)))
c, s = torch.cos(phi), torch.sin(phi)
alpha, psi, chi = torch.rand((3,) + twiddle_core_shape
) * math.pi * 2
A = torch.exp(1.0j * (alpha + psi)) * c
B = torch.exp(1.0j * (alpha + chi)) * s
C = -torch.exp(1.0j * (alpha - chi)) * s
D = torch.exp(1.0j * (alpha - psi)) * c
with torch.no_grad():
twiddle.copy_(torch.stack((torch.stack((A, B), dim=-1),
torch.stack((C, D), dim=-1)), dim=-2))
elif self.init == 'identity':
twiddle_new = torch.eye(2, dtype=twiddle.dtype).reshape(1, 1, 1,
1, 2, 2)
twiddle_new = twiddle_new.expand(*twiddle.shape).contiguous()
with torch.no_grad():
twiddle.copy_(twiddle_new)
if self.bias is not None:
bound = 1 / math.sqrt(self.in_size)
nn.init.uniform_(self.bias, -bound, bound)
def forward(self, input, transpose=False, conjugate=False, subtwiddle=False
):
"""
Parameters:
input: (batch, *, in_size)
transpose: whether the butterfly matrix should be transposed.
conjugate: whether the butterfly matrix should be conjugated.
subtwiddle: allow using only part of the parameters for smaller input.
Could be useful for weight sharing.
out_size is set to self.nstacks * self.in_size_extended in this case
Return:
output: (batch, *, out_size)
"""
twiddle = self.twiddle if not self.complex else view_as_complex(self
.twiddle)
if not subtwiddle:
output = self.pre_process(input)
else:
log_n = int(math.ceil(math.log2(input.size(-1))))
n = 1 << log_n
output = self.pre_process(input, padded_size=n)
twiddle = twiddle[:, :, :log_n, :n // 2
] if self.increasing_stride else twiddle[:, :, -log_n:, :n // 2
]
if conjugate and self.complex:
twiddle = twiddle.conj()
if not transpose:
output = butterfly_multiply(twiddle, output, self.increasing_stride
)
else:
twiddle = twiddle.transpose(-1, -2).flip([1, 2])
last_increasing_stride = self.increasing_stride != ((self.
nblocks - 1) % 2 == 1)
output = butterfly_multiply(twiddle, output, not
last_increasing_stride)
if not subtwiddle:
return self.post_process(input, output)
else:
return self.post_process(input, output, out_size=output.size(-1))
def pre_process(self, input, padded_size=None):
if padded_size is None:
padded_size = self.in_size_extended
input_size = input.size(-1)
output = input.reshape(-1, input_size)
batch = output.shape[0]
if input_size != padded_size:
output = F.pad(output, (0, padded_size - input_size))
output = output.unsqueeze(1).expand(batch, self.nstacks, padded_size)
return output
def post_process(self, input, output, out_size=None):
if out_size is None:
out_size = self.out_size
batch = output.shape[0]
output = output.view(batch, self.nstacks * output.size(-1))
out_size_extended = 1 << int(math.ceil(math.log2(output.size(-1))))
if out_size != out_size_extended:
output = output[:, :out_size]
if self.bias is not None:
bias = self.bias if not self.complex else view_as_complex(self.bias
)
output = output + bias[:out_size]
return output.view(*input.size()[:-1], out_size)
def extra_repr(self):
s = (
'in_size={}, out_size={}, bias={}, complex={}, increasing_stride={}, init={}, nblocks={}'
.format(self.in_size, self.out_size, self.bias is not None,
self.complex, self.increasing_stride, self.init, self.nblocks))
return s
class MLP(nn.Module):
def __init__(self, method='linear', **kwargs):
super().__init__()
if method == 'linear':
def make_layer(name):
return self.add_module(name, nn.Linear(1024, 1024, bias=True))
elif method == 'butterfly':
def make_layer(name):
return self.add_module(name, Butterfly(1024, 1024, bias=
True, **kwargs))
elif method == 'low-rank':
def make_layer(name):
return self.add_module(name, nn.Sequential(nn.Linear(1024,
kwargs['rank'], bias=False), nn.Linear(kwargs['rank'],
1024, bias=True)))
elif method == 'toeplitz':
def make_layer(name):
return self.add_module(name, sl.ToeplitzLikeC(layer_size=
1024, bias=True, **kwargs))
else:
assert False, f'method {method} not supported'
make_layer('fc10')
make_layer('fc11')
make_layer('fc12')
make_layer('fc2')
make_layer('fc3')
self.logits = nn.Linear(1024, 10)
def forward(self, x):
x = x.view(-1, 3, 1024)
x = self.fc10(x[:, 0, :]) + self.fc11(x[:, 1, :]) + self.fc12(x[:,
2, :])
x = F.relu(x)
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = self.logits(x)
return x
def get_inputs():
return [torch.rand([4, 3, 1024])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import math
from torch import nn
import torch.nn.functional as F
import torch.cuda
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_relu_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, None)
tmp4 = tl.load(in_ptr2 + x0, None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + x2, None)
tmp8 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp11 = tl.full([1], 0, tl.int32)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tl.store(in_out_ptr0 + x2, tmp12, None)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 3, 1024), (3072, 1024, 1))
assert_size_stride(primals_2, (1024, 1024), (1024, 1))
assert_size_stride(primals_3, (1024,), (1,))
assert_size_stride(primals_4, (1024, 1024), (1024, 1))
assert_size_stride(primals_5, (1024,), (1,))
assert_size_stride(primals_6, (1024, 1024), (1024, 1))
assert_size_stride(primals_7, (1024,), (1,))
assert_size_stride(primals_8, (1024, 1024), (1024, 1))
assert_size_stride(primals_9, (1024,), (1,))
assert_size_stride(primals_10, (1024, 1024), (1024, 1))
assert_size_stride(primals_11, (1024,), (1,))
assert_size_stride(primals_12, (10, 1024), (1024, 1))
assert_size_stride(primals_13, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 1024), (3072, 1
), 0), reinterpret_tensor(primals_2, (1024, 1024), (1, 1024), 0
), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 1024), (3072, 1
), 1024), reinterpret_tensor(primals_4, (1024, 1024), (1, 1024),
0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 1024), (3072, 1
), 2048), reinterpret_tensor(primals_6, (1024, 1024), (1, 1024),
0), out=buf2)
del primals_6
buf3 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_relu_0[grid(4096)](buf3, primals_3, buf1,
primals_5, buf2, primals_7, 4096, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_3
del primals_5
del primals_7
buf4 = buf2
del buf2
extern_kernels.mm(buf3, reinterpret_tensor(primals_8, (1024, 1024),
(1, 1024), 0), out=buf4)
buf5 = buf4
del buf4
triton_poi_fused_relu_1[grid(4096)](buf5, primals_9, 4096, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_9
buf6 = buf1
del buf1
extern_kernels.mm(buf5, reinterpret_tensor(primals_10, (1024, 1024),
(1, 1024), 0), out=buf6)
buf7 = buf6
del buf6
triton_poi_fused_relu_1[grid(4096)](buf7, primals_11, 4096, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_11
buf8 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_13, buf7, reinterpret_tensor(
primals_12, (1024, 10), (1, 1024), 0), alpha=1, beta=1, out=buf8)
del primals_13
return buf8, reinterpret_tensor(primals_1, (4, 1024), (3072, 1), 0
), reinterpret_tensor(primals_1, (4, 1024), (3072, 1), 1024
), reinterpret_tensor(primals_1, (4, 1024), (3072, 1), 2048
), buf3, buf5, buf7, primals_12, primals_10, primals_8
class Butterfly(nn.Module):
"""Product of log N butterfly factors, each is a block 2x2 of diagonal matrices.
Compatible with torch.nn.Linear.
Parameters:
in_size: size of input
out_size: size of output
bias: If set to False, the layer will not learn an additive bias.
Default: ``True``
complex: whether complex or real
increasing_stride: whether the first butterfly block will multiply with increasing stride
(e.g. 1, 2, ..., n/2) or decreasing stride (e.g., n/2, n/4, ..., 1).
init: 'randn', 'ortho', or 'identity'. Whether the weight matrix should be initialized to
from randn twiddle, or to be randomly orthogonal/unitary, or to be the identity matrix.
nblocks: number of B or B^T blocks. The B and B^T will alternate.
"""
def __init__(self, in_size, out_size, bias=True, complex=False,
increasing_stride=True, init='randn', nblocks=1):
super().__init__()
self.in_size = in_size
log_n = int(math.ceil(math.log2(in_size)))
self.log_n = log_n
size = self.in_size_extended = 1 << log_n
self.out_size = out_size
self.nstacks = int(math.ceil(out_size / self.in_size_extended))
self.complex = complex
self.increasing_stride = increasing_stride
assert nblocks >= 1
self.nblocks = nblocks
dtype = torch.get_default_dtype(
) if not self.complex else real_dtype_to_complex[torch.
get_default_dtype()]
twiddle_shape = self.nstacks, nblocks, log_n, size // 2, 2, 2
assert init in ['randn', 'ortho', 'identity']
self.init = init
self.twiddle = nn.Parameter(torch.empty(twiddle_shape, dtype=dtype))
if bias:
self.bias = nn.Parameter(torch.empty(out_size, dtype=dtype))
else:
self.register_parameter('bias', None)
self.twiddle._is_structured = True
if self.complex:
self.twiddle = nn.Parameter(view_as_real(self.twiddle))
if self.bias is not None:
self.bias = nn.Parameter(view_as_real(self.bias))
self.reset_parameters()
def reset_parameters(self):
"""Initialize bias the same way as torch.nn.Linear."""
twiddle = self.twiddle if not self.complex else view_as_complex(self
.twiddle)
if self.init == 'randn':
scaling = 1.0 / math.sqrt(2)
with torch.no_grad():
twiddle.copy_(torch.randn(twiddle.shape, dtype=twiddle.
dtype) * scaling)
elif self.init == 'ortho':
twiddle_core_shape = twiddle.shape[:-2]
if not self.complex:
theta = torch.rand(twiddle_core_shape) * math.pi * 2
c, s = torch.cos(theta), torch.sin(theta)
det = torch.randint(0, 2, twiddle_core_shape, dtype=c.dtype
) * 2 - 1
with torch.no_grad():
twiddle.copy_(torch.stack((torch.stack((det * c, -det *
s), dim=-1), torch.stack((s, c), dim=-1)), dim=-2))
else:
phi = torch.asin(torch.sqrt(torch.rand(twiddle_core_shape)))
c, s = torch.cos(phi), torch.sin(phi)
alpha, psi, chi = torch.rand((3,) + twiddle_core_shape
) * math.pi * 2
A = torch.exp(1.0j * (alpha + psi)) * c
B = torch.exp(1.0j * (alpha + chi)) * s
C = -torch.exp(1.0j * (alpha - chi)) * s
D = torch.exp(1.0j * (alpha - psi)) * c
with torch.no_grad():
twiddle.copy_(torch.stack((torch.stack((A, B), dim=-1),
torch.stack((C, D), dim=-1)), dim=-2))
elif self.init == 'identity':
twiddle_new = torch.eye(2, dtype=twiddle.dtype).reshape(1, 1, 1,
1, 2, 2)
twiddle_new = twiddle_new.expand(*twiddle.shape).contiguous()
with torch.no_grad():
twiddle.copy_(twiddle_new)
if self.bias is not None:
bound = 1 / math.sqrt(self.in_size)
nn.init.uniform_(self.bias, -bound, bound)
def forward(self, input, transpose=False, conjugate=False, subtwiddle=False
):
"""
Parameters:
input: (batch, *, in_size)
transpose: whether the butterfly matrix should be transposed.
conjugate: whether the butterfly matrix should be conjugated.
subtwiddle: allow using only part of the parameters for smaller input.
Could be useful for weight sharing.
out_size is set to self.nstacks * self.in_size_extended in this case
Return:
output: (batch, *, out_size)
"""
twiddle = self.twiddle if not self.complex else view_as_complex(self
.twiddle)
if not subtwiddle:
output = self.pre_process(input)
else:
log_n = int(math.ceil(math.log2(input.size(-1))))
n = 1 << log_n
output = self.pre_process(input, padded_size=n)
twiddle = twiddle[:, :, :log_n, :n // 2
] if self.increasing_stride else twiddle[:, :, -log_n:, :n // 2
]
if conjugate and self.complex:
twiddle = twiddle.conj()
if not transpose:
output = butterfly_multiply(twiddle, output, self.increasing_stride
)
else:
twiddle = twiddle.transpose(-1, -2).flip([1, 2])
last_increasing_stride = self.increasing_stride != ((self.
nblocks - 1) % 2 == 1)
output = butterfly_multiply(twiddle, output, not
last_increasing_stride)
if not subtwiddle:
return self.post_process(input, output)
else:
return self.post_process(input, output, out_size=output.size(-1))
def pre_process(self, input, padded_size=None):
if padded_size is None:
padded_size = self.in_size_extended
input_size = input.size(-1)
output = input.reshape(-1, input_size)
batch = output.shape[0]
if input_size != padded_size:
output = F.pad(output, (0, padded_size - input_size))
output = output.unsqueeze(1).expand(batch, self.nstacks, padded_size)
return output
def post_process(self, input, output, out_size=None):
if out_size is None:
out_size = self.out_size
batch = output.shape[0]
output = output.view(batch, self.nstacks * output.size(-1))
out_size_extended = 1 << int(math.ceil(math.log2(output.size(-1))))
if out_size != out_size_extended:
output = output[:, :out_size]
if self.bias is not None:
bias = self.bias if not self.complex else view_as_complex(self.bias
)
output = output + bias[:out_size]
return output.view(*input.size()[:-1], out_size)
def extra_repr(self):
s = (
'in_size={}, out_size={}, bias={}, complex={}, increasing_stride={}, init={}, nblocks={}'
.format(self.in_size, self.out_size, self.bias is not None,
self.complex, self.increasing_stride, self.init, self.nblocks))
return s
class MLPNew(nn.Module):
def __init__(self, method='linear', **kwargs):
super().__init__()
if method == 'linear':
def make_layer(name):
return self.add_module(name, nn.Linear(1024, 1024, bias=True))
elif method == 'butterfly':
def make_layer(name):
return self.add_module(name, Butterfly(1024, 1024, bias=
True, **kwargs))
elif method == 'low-rank':
def make_layer(name):
return self.add_module(name, nn.Sequential(nn.Linear(1024,
kwargs['rank'], bias=False), nn.Linear(kwargs['rank'],
1024, bias=True)))
elif method == 'toeplitz':
def make_layer(name):
return self.add_module(name, sl.ToeplitzLikeC(layer_size=
1024, bias=True, **kwargs))
else:
assert False, f'method {method} not supported'
make_layer('fc10')
make_layer('fc11')
make_layer('fc12')
make_layer('fc2')
make_layer('fc3')
self.logits = nn.Linear(1024, 10)
def forward(self, input_0):
primals_2 = self.fc10.weight
primals_3 = self.fc10.bias
primals_4 = self.fc11.weight
primals_5 = self.fc11.bias
primals_6 = self.fc12.weight
primals_7 = self.fc12.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_10 = self.fc3.weight
primals_11 = self.fc3.bias
primals_12 = self.logits.weight
primals_13 = self.logits.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
| sfox14/butterfly | MLP | false | 16,424 | [
"Apache-2.0"
] | 52 | 13cc15cee5bdb7adaf376219aaf20fab0459e9ef | https://github.com/sfox14/butterfly/tree/13cc15cee5bdb7adaf376219aaf20fab0459e9ef |
ActorCritic | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/md/cmd3ewacyhu5w5hausgbjbmtnt5rr66cgczh4ibdypq7dz6p4v7g.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/vh/cvhowampoosezwy5zm5vfkdmhzrvsn2u2gxpn4cchngk4b74ympu.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, div, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 2)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (2*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (2*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 - tmp3
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp2 - tmp3
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tmp5 / tmp10
tl.store(out_ptr0 + (x2), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (128, 4), (4, 1))
assert_size_stride(primals_2, (128, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (2, 128), (128, 1))
assert_size_stride(primals_5, (2, ), (1, ))
assert_size_stride(primals_6, (1, 128), (128, 1))
assert_size_stride(primals_7, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf0 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf6, 8192, grid=grid(8192), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [action_scores], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 2), (1, 128), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [state_values], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 1), (1, 128), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf2, buf5, 128, grid=grid(128), stream=stream0)
del buf2
return (buf5, reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 128), (128, 1), 0), buf5, primals_6, primals_4, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((128, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((2, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.distributions import Categorical
class ActorCritic(nn.Module):
def __init__(self):
super().__init__()
self.affine1 = nn.Linear(4, 128)
self.action_head = nn.Linear(128, 2)
self.value_head = nn.Linear(128, 1)
def forward(self, x):
x = F.relu(self.affine1(x))
action_scores = self.action_head(x)
state_values = self.value_head(x)
return F.softmax(action_scores, dim=-1), state_values
def select_action(self, state, values, select_props):
state = torch.from_numpy(state).float()
props, value = self(Variable(state))
dist = Categorical(props)
action = dist.sample()
log_props = dist.log_prob(action)
values.append(value)
select_props.append(log_props)
return action.data[0]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from torch.autograd import Variable
from torch.distributions import Categorical
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 2
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 - tmp3
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp2 - tmp3
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tmp5 / tmp10
tl.store(out_ptr0 + x2, tmp11, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (128, 4), (4, 1))
assert_size_stride(primals_2, (128,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (2, 128), (128, 1))
assert_size_stride(primals_5, (2,), (1,))
assert_size_stride(primals_6, (1, 128), (128, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1,
primals_2, buf6, 8192, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128),
(128, 1), 0), reinterpret_tensor(primals_4, (128, 2), (1, 128),
0), alpha=1, beta=1, out=buf2)
del primals_5
buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 128),
(128, 1), 0), reinterpret_tensor(primals_6, (128, 1), (1, 128),
0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
triton_poi_fused__softmax_1[grid(128)](buf2, buf5, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del buf2
return buf5, reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 128), (128, 1), 0
), buf5, primals_6, primals_4, buf6
class ActorCriticNew(nn.Module):
def __init__(self):
super().__init__()
self.affine1 = nn.Linear(4, 128)
self.action_head = nn.Linear(128, 2)
self.value_head = nn.Linear(128, 1)
def select_action(self, state, values, select_props):
state = torch.from_numpy(state).float()
props, value = self(Variable(state))
dist = Categorical(props)
action = dist.sample()
log_props = dist.log_prob(action)
values.append(value)
select_props.append(log_props)
return action.data[0]
def forward(self, input_0):
primals_1 = self.affine1.weight
primals_2 = self.affine1.bias
primals_4 = self.action_head.weight
primals_5 = self.action_head.bias
primals_6 = self.value_head.weight
primals_7 = self.value_head.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1]
| shinoyuki222/torch-light | ActorCritic | false | 16,425 | [
"MIT"
] | 310 | 4799805d9bcae82a9f12a574dcf9fdd838c92ee9 | https://github.com/shinoyuki222/torch-light/tree/4799805d9bcae82a9f12a574dcf9fdd838c92ee9 |
AdaIN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/vx/cvx2mkof7lntpjrpc65myom6nddnwk6dg2t67qoqxszlsadf3yea.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# x => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_per_fused__native_batch_norm_legit_0 = async_compile.triton('triton_per_fused__native_batch_norm_legit_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 16.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp21, xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/qk/cqktolenkjfb466t5wvtfndff62234jne6ubyyx4k2jd25jn3uar.py
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul_1
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, 0.7071067811865476), kwargs = {})
triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.7071067811865476
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/fl/cflgvbhseauqw5xxppuut3763myia6ggd4h7iojiz3edaac5oyx4.py
# Topologically Sorted Source Nodes: [mul_2, add_2], Original ATen: [aten.mul, aten.add]
# Source node to ATen node mapping:
# add_2 => add_3
# mul_2 => mul_3
# Graph fragment:
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_1, %view_1), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %unsqueeze_3), kwargs = {})
triton_poi_fused_add_mul_2 = async_compile.triton('triton_poi_fused_add_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = (xindex // 256)
x4 = xindex % 16
x0 = xindex % 4
x5 = xindex % 256
x2 = (xindex // 16) % 16
x6 = xindex
tmp0 = tl.load(in_ptr0 + (x4 + (16*x3)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x5), None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + (x2), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + (x2), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr5 + (x4 + (16*x3)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr6 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 - tmp4
tmp7 = tmp5 * tmp6
tmp8 = tmp2 * tmp7
tmp11 = tmp9 + tmp10
tmp12 = tmp8 + tmp11
tl.store(out_ptr0 + (x6), tmp12, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf1 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf3 = reinterpret_tensor(buf1, (1, 16, 1, 1), (16, 1, 1, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten._native_batch_norm_legit]
stream0 = get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_0.run(buf3, primals_1, buf0, 16, 16, grid=grid(16), stream=stream0)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
triton_poi_fused_mul_1.run(primals_2, buf4, 256, grid=grid(256), stream=stream0)
del primals_2
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf5)
del primals_3
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf6)
del primals_5
buf7 = empty_strided_cuda((4, 4, 4, 4, 4, 4), (1024, 256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_2, add_2], Original ATen: [aten.mul, aten.add]
triton_poi_fused_add_mul_2.run(buf5, primals_4, primals_1, buf0, buf3, buf6, primals_6, buf7, 4096, grid=grid(4096), stream=stream0)
del buf5
del buf6
del primals_4
del primals_6
return (buf7, primals_1, buf0, buf3, reinterpret_tensor(buf4, (64, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.utils.data
import torch.nn
class WSLinear(nn.Module):
def __init__(self, in_features, out_features, gain=2):
super(WSLinear, self).__init__()
self.linear = nn.Linear(in_features, out_features)
self.scale = (gain / in_features) ** 0.5
self.bias = self.linear.bias
self.linear.bias = None
nn.init.normal_(self.linear.weight)
nn.init.zeros_(self.bias)
def forward(self, x):
return self.linear(x * self.scale) + self.bias
class AdaIN(nn.Module):
def __init__(self, channels, w_dim):
super().__init__()
self.instance_norm = nn.InstanceNorm2d(channels)
self.style_scale = WSLinear(w_dim, channels)
self.style_bias = WSLinear(w_dim, channels)
def forward(self, x, w):
x = self.instance_norm(x)
style_scale = self.style_scale(w).unsqueeze(2).unsqueeze(3)
style_bias = self.style_bias(w).unsqueeze(2).unsqueeze(3)
return style_scale * x + style_bias
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4, 'w_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
import torch.utils.data
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__native_batch_norm_legit_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 16.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.7071067811865476
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_mul_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex // 256
x4 = xindex % 16
x0 = xindex % 4
x5 = xindex % 256
x2 = xindex // 16 % 16
x6 = xindex
tmp0 = tl.load(in_ptr0 + (x4 + 16 * x3), None, eviction_policy='evict_last'
)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x5, None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr5 + (x4 + 16 * x3), None, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr6 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 - tmp4
tmp7 = tmp5 * tmp6
tmp8 = tmp2 * tmp7
tmp11 = tmp9 + tmp10
tmp12 = tmp8 + tmp11
tl.store(out_ptr0 + x6, tmp12, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf1 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf3 = reinterpret_tensor(buf1, (1, 16, 1, 1), (16, 1, 1, 1), 0)
del buf1
get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_0[grid(16)](buf3,
primals_1, buf0, 16, 16, XBLOCK=8, num_warps=2, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_1[grid(256)](primals_2, buf4, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_2
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf5)
del primals_3
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf6)
del primals_5
buf7 = empty_strided_cuda((4, 4, 4, 4, 4, 4), (1024, 256, 64, 16, 4,
1), torch.float32)
triton_poi_fused_add_mul_2[grid(4096)](buf5, primals_4, primals_1,
buf0, buf3, buf6, primals_6, buf7, 4096, XBLOCK=128, num_warps=
4, num_stages=1)
del buf5
del buf6
del primals_4
del primals_6
return buf7, primals_1, buf0, buf3, reinterpret_tensor(buf4, (64, 4), (
4, 1), 0)
class WSLinear(nn.Module):
def __init__(self, in_features, out_features, gain=2):
super(WSLinear, self).__init__()
self.linear = nn.Linear(in_features, out_features)
self.scale = (gain / in_features) ** 0.5
self.bias = self.linear.bias
self.linear.bias = None
nn.init.normal_(self.linear.weight)
nn.init.zeros_(self.bias)
def forward(self, x):
return self.linear(x * self.scale) + self.bias
class AdaINNew(nn.Module):
def __init__(self, channels, w_dim):
super().__init__()
self.instance_norm = nn.InstanceNorm2d(channels)
self.style_scale = WSLinear(w_dim, channels)
self.style_bias = WSLinear(w_dim, channels)
def forward(self, input_0, input_1):
primals_4 = self.style_scale.bias
primals_3 = self.style_scale.linear.weight
primals_6 = self.style_bias.bias
primals_5 = self.style_bias.linear.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| shimon-c/Machine-Learning-Collection | AdaIN | false | 16,426 | [
"MIT"
] | 3,094 | ac5dcd03a40a08a8af7e1a67ade37f28cf88db43 | https://github.com/shimon-c/Machine-Learning-Collection/tree/ac5dcd03a40a08a8af7e1a67ade37f28cf88db43 |
BinaryFocalLossWithLogits | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/32/c32tdn4yej5mcd4zrs5qr56vp44zuc2xkprt2smkg5dq5eszjoqy.py
# Topologically Sorted Source Nodes: [probs, sub, add, pow_1, mul, mul_1, add_1, log, mul_2, add_2, pow_2, mul_3, sub_1, mul_4, sub_2, add_3, log_1, mul_5, loss_tmp], Original ATen: [aten.sigmoid, aten.rsub, aten.add, aten.pow, aten.mul, aten.log, aten.sub]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# add_3 => add_3
# log => log
# log_1 => log_1
# loss_tmp => sub_3
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# mul_5 => mul_5
# pow_1 => pow_1
# pow_2 => pow_2
# probs => sigmoid
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# Graph fragment:
# %sigmoid : [num_users=4] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %sigmoid), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, 1e-08), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 2.0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, -4), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %arg1_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sigmoid, 1e-08), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_1,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %log), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sigmoid, 1e-08), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add_2, 2.0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_2, -3), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %arg1_1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %sub_1), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %sigmoid), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_2, 1e-08), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_3,), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, %log_1), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_2, %mul_5), kwargs = {})
triton_poi_fused_add_log_mul_pow_rsub_sigmoid_sub_0 = async_compile.triton('triton_poi_fused_add_log_mul_pow_rsub_sigmoid_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_log_mul_pow_rsub_sigmoid_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_log_mul_pow_rsub_sigmoid_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp9 = tl.load(in_ptr1 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp4 = 1e-08
tmp5 = tmp3 + tmp4
tmp6 = tmp5 * tmp5
tmp7 = -4.0
tmp8 = tmp6 * tmp7
tmp10 = tmp8 * tmp9
tmp11 = tmp1 + tmp4
tmp12 = tl_math.log(tmp11)
tmp13 = tmp10 * tmp12
tmp14 = tmp11 * tmp11
tmp15 = -3.0
tmp16 = tmp14 * tmp15
tmp17 = tmp2 - tmp9
tmp18 = tmp16 * tmp17
tmp19 = tl_math.log(tmp5)
tmp20 = tmp18 * tmp19
tmp21 = tmp13 - tmp20
tl.store(out_ptr0 + (x0), tmp21, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [probs, sub, add, pow_1, mul, mul_1, add_1, log, mul_2, add_2, pow_2, mul_3, sub_1, mul_4, sub_2, add_3, log_1, mul_5, loss_tmp], Original ATen: [aten.sigmoid, aten.rsub, aten.add, aten.pow, aten.mul, aten.log, aten.sub]
stream0 = get_raw_stream(0)
triton_poi_fused_add_log_mul_pow_rsub_sigmoid_sub_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def binary_focal_loss_with_logits(input: 'torch.Tensor', target:
'torch.Tensor', alpha: 'float'=0.25, gamma: 'float'=2.0, reduction:
'str'='none', eps: 'float'=1e-08) ->torch.Tensor:
"""Function that computes Binary Focal loss.
.. math::
\\text{FL}(p_t) = -\\alpha_t (1 - p_t)^{\\gamma} \\, \\text{log}(p_t)
where:
- :math:`p_t` is the model's estimated probability for each class.
Args:
input: input data tensor of arbitrary shape.
target: the target tensor with shape matching input.
alpha: Weighting factor for the rare class :math:`\\alpha \\in [0, 1]`.
gamma: Focusing parameter :math:`\\gamma >= 0`.
reduction: Specifies the reduction to apply to the
output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction
will be applied, ``'mean'``: the sum of the output will be divided by
the number of elements in the output, ``'sum'``: the output will be
summed.
eps: for numerically stability when dividing.
Returns:
the computed loss.
Examples:
>>> kwargs = {"alpha": 0.25, "gamma": 2.0, "reduction": 'mean'}
>>> logits = torch.tensor([[[6.325]],[[5.26]],[[87.49]]])
>>> labels = torch.tensor([[[1.]],[[1.]],[[0.]]])
>>> binary_focal_loss_with_logits(logits, labels, **kwargs)
tensor(4.6052)
"""
if not isinstance(input, torch.Tensor):
raise TypeError(f'Input type is not a torch.Tensor. Got {type(input)}')
if not len(input.shape) >= 2:
raise ValueError(
f'Invalid input shape, we expect BxCx*. Got: {input.shape}')
if input.size(0) != target.size(0):
raise ValueError(
f'Expected input batch_size ({input.size(0)}) to match target batch_size ({target.size(0)}).'
)
probs = torch.sigmoid(input)
loss_tmp = -alpha * torch.pow(1.0 - probs + eps, gamma
) * target * torch.log(probs + eps) - (1 - alpha) * torch.pow(probs +
eps, gamma) * (1.0 - target) * torch.log(1.0 - probs + eps)
if reduction == 'none':
loss = loss_tmp
elif reduction == 'mean':
loss = torch.mean(loss_tmp)
elif reduction == 'sum':
loss = torch.sum(loss_tmp)
else:
raise NotImplementedError(f'Invalid reduction mode: {reduction}')
return loss
class BinaryFocalLossWithLogits(nn.Module):
"""Criterion that computes Focal loss.
According to :cite:`lin2018focal`, the Focal loss is computed as follows:
.. math::
\\text{FL}(p_t) = -\\alpha_t (1 - p_t)^{\\gamma} \\, \\text{log}(p_t)
where:
- :math:`p_t` is the model's estimated probability for each class.
Args:
alpha): Weighting factor for the rare class :math:`\\alpha \\in [0, 1]`.
gamma: Focusing parameter :math:`\\gamma >= 0`.
reduction: Specifies the reduction to apply to the
output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction
will be applied, ``'mean'``: the sum of the output will be divided by
the number of elements in the output, ``'sum'``: the output will be
summed.
Shape:
- Input: :math:`(N, 1, *)`.
- Target: :math:`(N, 1, *)`.
Examples:
>>> N = 1 # num_classes
>>> kwargs = {"alpha": 0.25, "gamma": 2.0, "reduction": 'mean'}
>>> loss = BinaryFocalLossWithLogits(**kwargs)
>>> input = torch.randn(1, N, 3, 5, requires_grad=True)
>>> target = torch.empty(1, 3, 5, dtype=torch.long).random_(N)
>>> output = loss(input, target)
>>> output.backward()
"""
def __init__(self, alpha: 'float', gamma: 'float'=2.0, reduction: 'str'
='none') ->None:
super().__init__()
self.alpha: 'float' = alpha
self.gamma: 'float' = gamma
self.reduction: 'str' = reduction
self.eps: 'float' = 1e-08
def forward(self, input: 'torch.Tensor', target: 'torch.Tensor'
) ->torch.Tensor:
return binary_focal_loss_with_logits(input, target, self.alpha,
self.gamma, self.reduction, self.eps)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'alpha': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_log_mul_pow_rsub_sigmoid_sub_0(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp9 = tl.load(in_ptr1 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp4 = 1e-08
tmp5 = tmp3 + tmp4
tmp6 = tmp5 * tmp5
tmp7 = -4.0
tmp8 = tmp6 * tmp7
tmp10 = tmp8 * tmp9
tmp11 = tmp1 + tmp4
tmp12 = tl_math.log(tmp11)
tmp13 = tmp10 * tmp12
tmp14 = tmp11 * tmp11
tmp15 = -3.0
tmp16 = tmp14 * tmp15
tmp17 = tmp2 - tmp9
tmp18 = tmp16 * tmp17
tmp19 = tl_math.log(tmp5)
tmp20 = tmp18 * tmp19
tmp21 = tmp13 - tmp20
tl.store(out_ptr0 + x0, tmp21, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_log_mul_pow_rsub_sigmoid_sub_0[grid(256)](arg0_1,
arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
def binary_focal_loss_with_logits(input: 'torch.Tensor', target:
'torch.Tensor', alpha: 'float'=0.25, gamma: 'float'=2.0, reduction:
'str'='none', eps: 'float'=1e-08) ->torch.Tensor:
"""Function that computes Binary Focal loss.
.. math::
\\text{FL}(p_t) = -\\alpha_t (1 - p_t)^{\\gamma} \\, \\text{log}(p_t)
where:
- :math:`p_t` is the model's estimated probability for each class.
Args:
input: input data tensor of arbitrary shape.
target: the target tensor with shape matching input.
alpha: Weighting factor for the rare class :math:`\\alpha \\in [0, 1]`.
gamma: Focusing parameter :math:`\\gamma >= 0`.
reduction: Specifies the reduction to apply to the
output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction
will be applied, ``'mean'``: the sum of the output will be divided by
the number of elements in the output, ``'sum'``: the output will be
summed.
eps: for numerically stability when dividing.
Returns:
the computed loss.
Examples:
>>> kwargs = {"alpha": 0.25, "gamma": 2.0, "reduction": 'mean'}
>>> logits = torch.tensor([[[6.325]],[[5.26]],[[87.49]]])
>>> labels = torch.tensor([[[1.]],[[1.]],[[0.]]])
>>> binary_focal_loss_with_logits(logits, labels, **kwargs)
tensor(4.6052)
"""
if not isinstance(input, torch.Tensor):
raise TypeError(f'Input type is not a torch.Tensor. Got {type(input)}')
if not len(input.shape) >= 2:
raise ValueError(
f'Invalid input shape, we expect BxCx*. Got: {input.shape}')
if input.size(0) != target.size(0):
raise ValueError(
f'Expected input batch_size ({input.size(0)}) to match target batch_size ({target.size(0)}).'
)
probs = torch.sigmoid(input)
loss_tmp = -alpha * torch.pow(1.0 - probs + eps, gamma
) * target * torch.log(probs + eps) - (1 - alpha) * torch.pow(probs +
eps, gamma) * (1.0 - target) * torch.log(1.0 - probs + eps)
if reduction == 'none':
loss = loss_tmp
elif reduction == 'mean':
loss = torch.mean(loss_tmp)
elif reduction == 'sum':
loss = torch.sum(loss_tmp)
else:
raise NotImplementedError(f'Invalid reduction mode: {reduction}')
return loss
class BinaryFocalLossWithLogitsNew(nn.Module):
"""Criterion that computes Focal loss.
According to :cite:`lin2018focal`, the Focal loss is computed as follows:
.. math::
\\text{FL}(p_t) = -\\alpha_t (1 - p_t)^{\\gamma} \\, \\text{log}(p_t)
where:
- :math:`p_t` is the model's estimated probability for each class.
Args:
alpha): Weighting factor for the rare class :math:`\\alpha \\in [0, 1]`.
gamma: Focusing parameter :math:`\\gamma >= 0`.
reduction: Specifies the reduction to apply to the
output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction
will be applied, ``'mean'``: the sum of the output will be divided by
the number of elements in the output, ``'sum'``: the output will be
summed.
Shape:
- Input: :math:`(N, 1, *)`.
- Target: :math:`(N, 1, *)`.
Examples:
>>> N = 1 # num_classes
>>> kwargs = {"alpha": 0.25, "gamma": 2.0, "reduction": 'mean'}
>>> loss = BinaryFocalLossWithLogits(**kwargs)
>>> input = torch.randn(1, N, 3, 5, requires_grad=True)
>>> target = torch.empty(1, 3, 5, dtype=torch.long).random_(N)
>>> output = loss(input, target)
>>> output.backward()
"""
def __init__(self, alpha: 'float', gamma: 'float'=2.0, reduction: 'str'
='none') ->None:
super().__init__()
self.alpha: 'float' = alpha
self.gamma: 'float' = gamma
self.reduction: 'str' = reduction
self.eps: 'float' = 1e-08
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| shiyangc-intusurg/kornia | BinaryFocalLossWithLogits | false | 16,427 | [
"ECL-2.0",
"Apache-2.0"
] | 4,894 | 2e2512f8f20d300d8732e5873e16336b5a01f3bd | https://github.com/shiyangc-intusurg/kornia/tree/2e2512f8f20d300d8732e5873e16336b5a01f3bd |
h_sigmoid | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/rm/crm72qx6rsghhfnyhuw5kfn2z6uwrgv7td4c2blpcrynbklz2exg.py
# Topologically Sorted Source Nodes: [add, hardtanh, mul, truediv], Original ATen: [aten.add, aten.hardtanh, aten.mul, aten.div]
# Source node to ATen node mapping:
# add => add
# hardtanh => clamp_max, clamp_min
# mul => mul
# truediv => div
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 3), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0.0), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 6.0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_max, 1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 6), kwargs = {})
triton_poi_fused_add_div_hardtanh_mul_0 = async_compile.triton('triton_poi_fused_add_div_hardtanh_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_hardtanh_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = 1.0
tmp8 = tmp6 * tmp7
tmp9 = 0.16666666666666666
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, hardtanh, mul, truediv], Original ATen: [aten.add, aten.hardtanh, aten.mul, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_mul_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class h_sigmoid(nn.Module):
def __init__(self, inplace=True, h_max=1):
super(h_sigmoid, self).__init__()
self.relu = nn.ReLU6(inplace=inplace)
self.h_max = h_max
def forward(self, x):
return self.relu(x + 3) * self.h_max / 6
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = 1.0
tmp8 = tmp6 * tmp7
tmp9 = 0.16666666666666666
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_mul_0[grid(256)](arg0_1, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class h_sigmoidNew(nn.Module):
def __init__(self, inplace=True, h_max=1):
super(h_sigmoidNew, self).__init__()
self.relu = nn.ReLU6(inplace=inplace)
self.h_max = h_max
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| shivamsnaik/dynamic-head-microsoft-fork | h_sigmoid | false | 16,428 | [
"MIT"
] | 494 | 0f337eec44d262df2517be8f5617477c0b092fcc | https://github.com/shivamsnaik/dynamic-head-microsoft-fork/tree/0f337eec44d262df2517be8f5617477c0b092fcc |
DilatedGatedConv1D | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/iu/ciuxern2omgit5ovksuiwlddxkww6e3pkid4q2h3sauzn5rbd35z.py
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv1d => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1], [1], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xd/cxdkgesb6qc4hbcvbeerj4gwfolzo6uujsc7nr7gmmdljjywjmd6.py
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv1d => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1], [1], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 8
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/du/cduiwo37rwoxi2gcv7vm5njuipbt3s73osilncnhdi4hqe6v73w3.py
# Topologically Sorted Source Nodes: [dropout, x1_1, sub, mul, mul_1, add], Original ATen: [aten.clone, aten.sigmoid, aten.rsub, aten.mul, aten.add]
# Source node to ATen node mapping:
# add => add
# dropout => clone
# mul => mul
# mul_1 => mul_1
# sub => sub
# x1_1 => sigmoid
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%slice_3,), kwargs = {})
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%clone,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %sub), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%slice_6, %sigmoid), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
triton_poi_fused_add_clone_mul_rsub_sigmoid_2 = async_compile.triton('triton_poi_fused_add_clone_mul_rsub_sigmoid_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clone_mul_rsub_sigmoid_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clone_mul_rsub_sigmoid_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + (4*y0) + (32*y1)), xmask & ymask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (16 + x2 + (4*y0) + (32*y1)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = 1.0
tmp4 = tmp3 - tmp2
tmp5 = tmp0 * tmp4
tmp7 = tmp6 * tmp2
tmp8 = tmp5 + tmp7
tl.store(out_ptr0 + (y0 + (4*x2) + (16*y1)), tmp8, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (8, 4, 3), (12, 3, 1))
assert_size_stride(primals_3, (8, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(primals_1, buf0, 16, 4, grid=grid(16, 4), stream=stream0)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 8, 4), (32, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_3, 128, grid=grid(128), stream=stream0)
del primals_3
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [dropout, x1_1, sub, mul, mul_1, add], Original ATen: [aten.clone, aten.sigmoid, aten.rsub, aten.mul, aten.add]
triton_poi_fused_add_clone_mul_rsub_sigmoid_2.run(primals_1, buf2, buf3, 16, 4, grid=grid(16, 4), stream=stream0)
return (buf3, primals_1, primals_2, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, 4, 3), (12, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class DilatedGatedConv1D(nn.Module):
def __init__(self, dilation_rate, dim):
super().__init__()
self.dim = dim
self.dropout = nn.Dropout(p=0.1)
self.cnn = nn.Conv1d(dim, dim * 2, 3, padding=dilation_rate,
dilation=dilation_rate)
def forward(self, x):
residual = x
x = self.cnn(x.transpose(1, 2)).transpose(1, 2)
x1, x2 = x[:, :, :self.dim], x[:, :, self.dim:]
x1 = torch.sigmoid(self.dropout(x1))
return residual * (1 - x1) + x2 * x1
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dilation_rate': 1, 'dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 8
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_clone_mul_rsub_sigmoid_2(in_ptr0, in_ptr1,
out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + 4 * y0 + 32 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (16 + x2 + 4 * y0 + 32 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = 1.0
tmp4 = tmp3 - tmp2
tmp5 = tmp0 * tmp4
tmp7 = tmp6 * tmp2
tmp8 = tmp5 + tmp7
tl.store(out_ptr0 + (y0 + 4 * x2 + 16 * y1), tmp8, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (8, 4, 3), (12, 3, 1))
assert_size_stride(primals_3, (8,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(1,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 8, 4), (32, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(128)](buf2, primals_3, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf3 = buf0
del buf0
triton_poi_fused_add_clone_mul_rsub_sigmoid_2[grid(16, 4)](primals_1,
buf2, buf3, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
return buf3, primals_1, primals_2, buf2
class DilatedGatedConv1DNew(nn.Module):
def __init__(self, dilation_rate, dim):
super().__init__()
self.dim = dim
self.dropout = nn.Dropout(p=0.1)
self.cnn = nn.Conv1d(dim, dim * 2, 3, padding=dilation_rate,
dilation=dilation_rate)
def forward(self, input_0):
primals_2 = self.cnn.weight
primals_3 = self.cnn.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| shinoyuki222/torch-light | DilatedGatedConv1D | false | 16,429 | [
"MIT"
] | 310 | 4799805d9bcae82a9f12a574dcf9fdd838c92ee9 | https://github.com/shinoyuki222/torch-light/tree/4799805d9bcae82a9f12a574dcf9fdd838c92ee9 |
MaskedMSELoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/3w/c3wz4ocb4lxjauhilza5zv4flhxivvvbzy7sau5kuhlocrkqzmv6.py
# Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits, mul, sum_1, loss], Original ATen: [aten.binary_cross_entropy_with_logits, aten.mul, aten.sum, aten.div]
# Source node to ATen node mapping:
# binary_cross_entropy_with_logits => abs_1, exp, full_default, log1p, minimum, mul_1, neg, sub, sub_1, sub_2, sum_1
# loss => div
# mul => mul
# sum_1 => sum_2
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg2_1), kwargs = {})
# %mul : [num_users=3] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %mul), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %mul), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%mul,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, %sub_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_2,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%arg1_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %sum_2), kwargs = {})
triton_per_fused_binary_cross_entropy_with_logits_div_mul_sum_0 = async_compile.triton('triton_per_fused_binary_cross_entropy_with_logits_div_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_with_logits_div_mul_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_div_mul_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp3 = tl.load(in_ptr1 + (r0), None)
tmp4 = tl.load(in_ptr2 + (r0), None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp5 = tmp3 * tmp4
tmp6 = tmp2 * tmp5
tmp7 = 0.0
tmp8 = triton_helpers.minimum(tmp7, tmp5)
tmp9 = tl_math.abs(tmp5)
tmp10 = -tmp9
tmp11 = tl_math.exp(tmp10)
tmp12 = libdevice.log1p(tmp11)
tmp13 = tmp8 - tmp12
tmp14 = tmp6 - tmp13
tmp15 = tl.broadcast_to(tmp14, [RBLOCK])
tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0))
tmp18 = tl.broadcast_to(tmp4, [RBLOCK])
tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0))
tmp21 = tmp17 / tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp21, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits, mul, sum_1, loss], Original ATen: [aten.binary_cross_entropy_with_logits, aten.mul, aten.sum, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_div_mul_sum_0.run(buf2, arg2_1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class MaskedMSELoss(nn.Module):
def __init__(self):
super(MaskedMSELoss, self).__init__()
self.loss = nn.BCEWithLogitsLoss(reduction='sum')
def forward(self, pred, target, mask):
"""
pred -> batch*seq_len
target -> batch*seq_len
mask -> batch*seq_len
"""
loss = self.loss(pred * mask, target) / torch.sum(mask)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_div_mul_sum_0(in_out_ptr0
, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp4 = tl.load(in_ptr2 + r0, None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp5 = tmp3 * tmp4
tmp6 = tmp2 * tmp5
tmp7 = 0.0
tmp8 = triton_helpers.minimum(tmp7, tmp5)
tmp9 = tl_math.abs(tmp5)
tmp10 = -tmp9
tmp11 = tl_math.exp(tmp10)
tmp12 = libdevice.log1p(tmp11)
tmp13 = tmp8 - tmp12
tmp14 = tmp6 - tmp13
tmp15 = tl.broadcast_to(tmp14, [RBLOCK])
tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0))
tmp18 = tl.broadcast_to(tmp4, [RBLOCK])
tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0))
tmp21 = tmp17 / tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_div_mul_sum_0[grid(1)
](buf2, arg2_1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf2,
class MaskedMSELossNew(nn.Module):
def __init__(self):
super(MaskedMSELossNew, self).__init__()
self.loss = nn.BCEWithLogitsLoss(reduction='sum')
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| shrx11/M2H2-dataset | MaskedMSELoss | false | 16,430 | [
"MIT"
] | 206 | 8be80041fc0de04f2a6113e305f09f3b8d6279f4 | https://github.com/shrx11/M2H2-dataset/tree/8be80041fc0de04f2a6113e305f09f3b8d6279f4 |
CombineTensorPatches | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/aa/caa6fq266iykltiy742bcvfllwt3rns2bw3nxkbd4mv4ax22o2cp.py
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_1 => cat_1
# Graph fragment:
# %cat_1 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%getitem_4, %getitem_5, %getitem_6, %getitem_7], -1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = (xindex // 16) % 16
x2 = (xindex // 256) % 4
x3 = (xindex // 1024)
x4 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = x1
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp7 & tmp4
tmp9 = tl.load(in_ptr0 + ((4*x1) + (16*x2) + (1024*x3) + x0), tmp8, eviction_policy='evict_last', other=0.0)
tmp10 = tmp5 >= tmp3
tmp11 = tl.full([1], 8, tl.int64)
tmp12 = tmp5 < tmp11
tmp13 = tmp10 & tmp12
tmp14 = tmp13 & tmp4
tmp15 = tl.load(in_ptr0 + (256 + (4*((-4) + x1)) + (16*x2) + (1024*x3) + x0), tmp14, eviction_policy='evict_last', other=0.0)
tmp16 = tmp5 >= tmp11
tmp17 = tl.full([1], 12, tl.int64)
tmp18 = tmp5 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tmp19 & tmp4
tmp21 = tl.load(in_ptr0 + (512 + (4*((-8) + x1)) + (16*x2) + (1024*x3) + x0), tmp20, eviction_policy='evict_last', other=0.0)
tmp22 = tmp5 >= tmp17
tmp23 = tl.full([1], 16, tl.int64)
tmp24 = tmp5 < tmp23
tmp25 = tmp22 & tmp4
tmp26 = tl.load(in_ptr0 + (768 + (4*((-12) + x1)) + (16*x2) + (1024*x3) + x0), tmp25, eviction_policy='evict_last', other=0.0)
tmp27 = tl.where(tmp19, tmp21, tmp26)
tmp28 = tl.where(tmp13, tmp15, tmp27)
tmp29 = tl.where(tmp7, tmp9, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp4, tmp29, tmp30)
tmp32 = tmp0 >= tmp3
tmp33 = tmp0 < tmp11
tmp34 = tmp32 & tmp33
tmp35 = tmp7 & tmp34
tmp36 = tl.load(in_ptr0 + (64 + (4*x1) + (16*x2) + (1024*x3) + ((-4) + x0)), tmp35, eviction_policy='evict_last', other=0.0)
tmp37 = tmp13 & tmp34
tmp38 = tl.load(in_ptr0 + (320 + (4*((-4) + x1)) + (16*x2) + (1024*x3) + ((-4) + x0)), tmp37, eviction_policy='evict_last', other=0.0)
tmp39 = tmp19 & tmp34
tmp40 = tl.load(in_ptr0 + (576 + (4*((-8) + x1)) + (16*x2) + (1024*x3) + ((-4) + x0)), tmp39, eviction_policy='evict_last', other=0.0)
tmp41 = tmp22 & tmp34
tmp42 = tl.load(in_ptr0 + (832 + (4*((-12) + x1)) + (16*x2) + (1024*x3) + ((-4) + x0)), tmp41, eviction_policy='evict_last', other=0.0)
tmp43 = tl.where(tmp19, tmp40, tmp42)
tmp44 = tl.where(tmp13, tmp38, tmp43)
tmp45 = tl.where(tmp7, tmp36, tmp44)
tmp46 = tl.full(tmp45.shape, 0.0, tmp45.dtype)
tmp47 = tl.where(tmp34, tmp45, tmp46)
tmp48 = tmp0 >= tmp11
tmp49 = tmp0 < tmp17
tmp50 = tmp48 & tmp49
tmp51 = tmp7 & tmp50
tmp52 = tl.load(in_ptr0 + (128 + (4*x1) + (16*x2) + (1024*x3) + ((-8) + x0)), tmp51, eviction_policy='evict_last', other=0.0)
tmp53 = tmp13 & tmp50
tmp54 = tl.load(in_ptr0 + (384 + (4*((-4) + x1)) + (16*x2) + (1024*x3) + ((-8) + x0)), tmp53, eviction_policy='evict_last', other=0.0)
tmp55 = tmp19 & tmp50
tmp56 = tl.load(in_ptr0 + (640 + (4*((-8) + x1)) + (16*x2) + (1024*x3) + ((-8) + x0)), tmp55, eviction_policy='evict_last', other=0.0)
tmp57 = tmp22 & tmp50
tmp58 = tl.load(in_ptr0 + (896 + (4*((-12) + x1)) + (16*x2) + (1024*x3) + ((-8) + x0)), tmp57, eviction_policy='evict_last', other=0.0)
tmp59 = tl.where(tmp19, tmp56, tmp58)
tmp60 = tl.where(tmp13, tmp54, tmp59)
tmp61 = tl.where(tmp7, tmp52, tmp60)
tmp62 = tl.full(tmp61.shape, 0.0, tmp61.dtype)
tmp63 = tl.where(tmp50, tmp61, tmp62)
tmp64 = tmp0 >= tmp17
tmp65 = tmp0 < tmp23
tmp66 = tmp7 & tmp64
tmp67 = tl.load(in_ptr0 + (192 + (4*x1) + (16*x2) + (1024*x3) + ((-12) + x0)), tmp66, eviction_policy='evict_last', other=0.0)
tmp68 = tmp13 & tmp64
tmp69 = tl.load(in_ptr0 + (448 + (4*((-4) + x1)) + (16*x2) + (1024*x3) + ((-12) + x0)), tmp68, eviction_policy='evict_last', other=0.0)
tmp70 = tmp19 & tmp64
tmp71 = tl.load(in_ptr0 + (704 + (4*((-8) + x1)) + (16*x2) + (1024*x3) + ((-12) + x0)), tmp70, eviction_policy='evict_last', other=0.0)
tmp72 = tmp22 & tmp64
tmp73 = tl.load(in_ptr0 + (960 + (4*((-12) + x1)) + (16*x2) + (1024*x3) + ((-12) + x0)), tmp72, eviction_policy='evict_last', other=0.0)
tmp74 = tl.where(tmp19, tmp71, tmp73)
tmp75 = tl.where(tmp13, tmp69, tmp74)
tmp76 = tl.where(tmp7, tmp67, tmp75)
tmp77 = tl.full(tmp76.shape, 0.0, tmp76.dtype)
tmp78 = tl.where(tmp64, tmp76, tmp77)
tmp79 = tl.where(tmp50, tmp63, tmp78)
tmp80 = tl.where(tmp34, tmp47, tmp79)
tmp81 = tl.where(tmp4, tmp31, tmp80)
tl.store(out_ptr0 + (x4), tmp81, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4, 4), (1024, 256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 16, 16), (1024, 1024, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(arg0_1, buf0, 4096, grid=grid(4096), stream=stream0)
del arg0_1
return (reinterpret_tensor(buf0, (4, 4, 16, 16), (1024, 256, 16, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4, 4, 4), (1024, 256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from typing import Optional
from typing import Tuple
import torch.nn as nn
from typing import Union
from torch.nn.modules.utils import _pair
def combine_tensor_patches(patches: 'torch.Tensor', window_size:
'Tuple[int, int]'=(4, 4), stride: 'Tuple[int, int]'=(4, 4), unpadding:
'Optional[Tuple[int, int, int, int]]'=None) ->torch.Tensor:
"""Restore input from patches.
Args:
patches: patched tensor.
window_size: the size of the sliding window and the output patch size.
stride: stride of the sliding window.
unpadding: remove the padding added to both side of the input.
Shape:
- Input: :math:`(B, N, C, H_{out}, W_{out})`
- Output: :math:`(B, C, H, W)`
Example:
>>> out = extract_tensor_patches(torch.arange(16).view(1, 1, 4, 4), window_size=(2, 2), stride=(2, 2))
>>> combine_tensor_patches(out, window_size=(2, 2), stride=(2, 2))
tensor([[[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]]]])
"""
if stride[0] != window_size[0] or stride[1] != window_size[1]:
raise NotImplementedError(
f'Only stride == window_size is supported. Got {stride} and {window_size}.Please feel free to drop a PR to Kornia Github.'
)
if unpadding is not None:
window_size = window_size[0] + (unpadding[0] + unpadding[1]
) // window_size[0], window_size[1] + (unpadding[2] + unpadding[3]
) // window_size[1]
patches_tensor = patches.view(-1, window_size[0], window_size[1], *
patches.shape[-3:])
restored_tensor = torch.cat(torch.chunk(patches_tensor, window_size[0],
dim=1), -2).squeeze(1)
restored_tensor = torch.cat(torch.chunk(restored_tensor, window_size[1],
dim=1), -1).squeeze(1)
if unpadding is not None:
restored_tensor = torch.nn.functional.pad(restored_tensor, [(-i) for
i in unpadding])
return restored_tensor
class CombineTensorPatches(nn.Module):
"""Module that combine patches from tensors.
In the simplest case, the output value of the operator with input size
:math:`(B, N, C, H_{out}, W_{out})` is :math:`(B, C, H, W)`.
where
- :math:`B` is the batch size.
- :math:`N` denotes the total number of extracted patches stacked in
- :math:`C` denotes the number of input channels.
- :math:`H`, :math:`W` the input height and width of the input in pixels.
- :math:`H_{out}`, :math:`W_{out}` denote to denote to the patch size
defined in the function signature.
left-right and top-bottom order.
* :attr:`window_size` is the size of the sliding window and controls the
shape of the output tensor and defines the shape of the output patch.
* :attr:`stride` controls the stride to apply to the sliding window and
regulates the overlapping between the extracted patches.
* :attr:`padding` controls the amount of implicit zeros-paddings on both
sizes at each dimension.
The parameters :attr:`window_size`, :attr:`stride` and :attr:`padding` can
be either:
- a single ``int`` -- in which case the same value is used for the
height and width dimension.
- a ``tuple`` of two ints -- in which case, the first `int` is used for
the height dimension, and the second `int` for the width dimension.
Args:
patches: patched tensor.
window_size: the size of the sliding window and the output patch size.
unpadding: remove the padding added to both side of the input.
Shape:
- Input: :math:`(B, N, C, H_{out}, W_{out})`
- Output: :math:`(B, C, H, W)`
Example:
>>> out = extract_tensor_patches(torch.arange(16).view(1, 1, 4, 4), window_size=(2, 2), stride=(2, 2))
>>> combine_tensor_patches(out, window_size=(2, 2), stride=(2, 2))
tensor([[[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]]]])
"""
def __init__(self, window_size: 'Union[int, Tuple[int, int]]',
unpadding: 'Union[int, Tuple[int, int]]'=0) ->None:
super().__init__()
self.window_size: 'Tuple[int, int]' = _pair(window_size)
pad: 'Tuple[int, int]' = _pair(unpadding)
self.unpadding: 'Tuple[int, int, int, int]' = (pad[0], pad[0], pad[
1], pad[1])
def forward(self, input: 'torch.Tensor') ->torch.Tensor:
return combine_tensor_patches(input, self.window_size, stride=self.
window_size, unpadding=self.unpadding)
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4, 4])]
def get_init_inputs():
return [[], {'window_size': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from typing import Optional
from typing import Tuple
import torch.nn as nn
from typing import Union
from torch.nn.modules.utils import _pair
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16 % 16
x2 = xindex // 256 % 4
x3 = xindex // 1024
x4 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = x1
tmp7 = tmp5 < tmp3
tmp8 = tmp7 & tmp4
tmp9 = tl.load(in_ptr0 + (4 * x1 + 16 * x2 + 1024 * x3 + x0), tmp8,
eviction_policy='evict_last', other=0.0)
tmp10 = tmp5 >= tmp3
tmp11 = tl.full([1], 8, tl.int64)
tmp12 = tmp5 < tmp11
tmp13 = tmp10 & tmp12
tmp14 = tmp13 & tmp4
tmp15 = tl.load(in_ptr0 + (256 + 4 * (-4 + x1) + 16 * x2 + 1024 * x3 +
x0), tmp14, eviction_policy='evict_last', other=0.0)
tmp16 = tmp5 >= tmp11
tmp17 = tl.full([1], 12, tl.int64)
tmp18 = tmp5 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tmp19 & tmp4
tmp21 = tl.load(in_ptr0 + (512 + 4 * (-8 + x1) + 16 * x2 + 1024 * x3 +
x0), tmp20, eviction_policy='evict_last', other=0.0)
tmp22 = tmp5 >= tmp17
tl.full([1], 16, tl.int64)
tmp25 = tmp22 & tmp4
tmp26 = tl.load(in_ptr0 + (768 + 4 * (-12 + x1) + 16 * x2 + 1024 * x3 +
x0), tmp25, eviction_policy='evict_last', other=0.0)
tmp27 = tl.where(tmp19, tmp21, tmp26)
tmp28 = tl.where(tmp13, tmp15, tmp27)
tmp29 = tl.where(tmp7, tmp9, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp4, tmp29, tmp30)
tmp32 = tmp0 >= tmp3
tmp33 = tmp0 < tmp11
tmp34 = tmp32 & tmp33
tmp35 = tmp7 & tmp34
tmp36 = tl.load(in_ptr0 + (64 + 4 * x1 + 16 * x2 + 1024 * x3 + (-4 + x0
)), tmp35, eviction_policy='evict_last', other=0.0)
tmp37 = tmp13 & tmp34
tmp38 = tl.load(in_ptr0 + (320 + 4 * (-4 + x1) + 16 * x2 + 1024 * x3 +
(-4 + x0)), tmp37, eviction_policy='evict_last', other=0.0)
tmp39 = tmp19 & tmp34
tmp40 = tl.load(in_ptr0 + (576 + 4 * (-8 + x1) + 16 * x2 + 1024 * x3 +
(-4 + x0)), tmp39, eviction_policy='evict_last', other=0.0)
tmp41 = tmp22 & tmp34
tmp42 = tl.load(in_ptr0 + (832 + 4 * (-12 + x1) + 16 * x2 + 1024 * x3 +
(-4 + x0)), tmp41, eviction_policy='evict_last', other=0.0)
tmp43 = tl.where(tmp19, tmp40, tmp42)
tmp44 = tl.where(tmp13, tmp38, tmp43)
tmp45 = tl.where(tmp7, tmp36, tmp44)
tmp46 = tl.full(tmp45.shape, 0.0, tmp45.dtype)
tmp47 = tl.where(tmp34, tmp45, tmp46)
tmp48 = tmp0 >= tmp11
tmp49 = tmp0 < tmp17
tmp50 = tmp48 & tmp49
tmp51 = tmp7 & tmp50
tmp52 = tl.load(in_ptr0 + (128 + 4 * x1 + 16 * x2 + 1024 * x3 + (-8 +
x0)), tmp51, eviction_policy='evict_last', other=0.0)
tmp53 = tmp13 & tmp50
tmp54 = tl.load(in_ptr0 + (384 + 4 * (-4 + x1) + 16 * x2 + 1024 * x3 +
(-8 + x0)), tmp53, eviction_policy='evict_last', other=0.0)
tmp55 = tmp19 & tmp50
tmp56 = tl.load(in_ptr0 + (640 + 4 * (-8 + x1) + 16 * x2 + 1024 * x3 +
(-8 + x0)), tmp55, eviction_policy='evict_last', other=0.0)
tmp57 = tmp22 & tmp50
tmp58 = tl.load(in_ptr0 + (896 + 4 * (-12 + x1) + 16 * x2 + 1024 * x3 +
(-8 + x0)), tmp57, eviction_policy='evict_last', other=0.0)
tmp59 = tl.where(tmp19, tmp56, tmp58)
tmp60 = tl.where(tmp13, tmp54, tmp59)
tmp61 = tl.where(tmp7, tmp52, tmp60)
tmp62 = tl.full(tmp61.shape, 0.0, tmp61.dtype)
tmp63 = tl.where(tmp50, tmp61, tmp62)
tmp64 = tmp0 >= tmp17
tmp66 = tmp7 & tmp64
tmp67 = tl.load(in_ptr0 + (192 + 4 * x1 + 16 * x2 + 1024 * x3 + (-12 +
x0)), tmp66, eviction_policy='evict_last', other=0.0)
tmp68 = tmp13 & tmp64
tmp69 = tl.load(in_ptr0 + (448 + 4 * (-4 + x1) + 16 * x2 + 1024 * x3 +
(-12 + x0)), tmp68, eviction_policy='evict_last', other=0.0)
tmp70 = tmp19 & tmp64
tmp71 = tl.load(in_ptr0 + (704 + 4 * (-8 + x1) + 16 * x2 + 1024 * x3 +
(-12 + x0)), tmp70, eviction_policy='evict_last', other=0.0)
tmp72 = tmp22 & tmp64
tmp73 = tl.load(in_ptr0 + (960 + 4 * (-12 + x1) + 16 * x2 + 1024 * x3 +
(-12 + x0)), tmp72, eviction_policy='evict_last', other=0.0)
tmp74 = tl.where(tmp19, tmp71, tmp73)
tmp75 = tl.where(tmp13, tmp69, tmp74)
tmp76 = tl.where(tmp7, tmp67, tmp75)
tmp77 = tl.full(tmp76.shape, 0.0, tmp76.dtype)
tmp78 = tl.where(tmp64, tmp76, tmp77)
tmp79 = tl.where(tmp50, tmp63, tmp78)
tmp80 = tl.where(tmp34, tmp47, tmp79)
tmp81 = tl.where(tmp4, tmp31, tmp80)
tl.store(out_ptr0 + x4, tmp81, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4, 4), (1024, 256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 16, 16), (1024, 1024, 256, 16,
1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(4096)](arg0_1, buf0, 4096, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 4, 16, 16), (1024, 256, 16, 1), 0),
def combine_tensor_patches(patches: 'torch.Tensor', window_size:
'Tuple[int, int]'=(4, 4), stride: 'Tuple[int, int]'=(4, 4), unpadding:
'Optional[Tuple[int, int, int, int]]'=None) ->torch.Tensor:
"""Restore input from patches.
Args:
patches: patched tensor.
window_size: the size of the sliding window and the output patch size.
stride: stride of the sliding window.
unpadding: remove the padding added to both side of the input.
Shape:
- Input: :math:`(B, N, C, H_{out}, W_{out})`
- Output: :math:`(B, C, H, W)`
Example:
>>> out = extract_tensor_patches(torch.arange(16).view(1, 1, 4, 4), window_size=(2, 2), stride=(2, 2))
>>> combine_tensor_patches(out, window_size=(2, 2), stride=(2, 2))
tensor([[[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]]]])
"""
if stride[0] != window_size[0] or stride[1] != window_size[1]:
raise NotImplementedError(
f'Only stride == window_size is supported. Got {stride} and {window_size}.Please feel free to drop a PR to Kornia Github.'
)
if unpadding is not None:
window_size = window_size[0] + (unpadding[0] + unpadding[1]
) // window_size[0], window_size[1] + (unpadding[2] + unpadding[3]
) // window_size[1]
patches_tensor = patches.view(-1, window_size[0], window_size[1], *
patches.shape[-3:])
restored_tensor = torch.cat(torch.chunk(patches_tensor, window_size[0],
dim=1), -2).squeeze(1)
restored_tensor = torch.cat(torch.chunk(restored_tensor, window_size[1],
dim=1), -1).squeeze(1)
if unpadding is not None:
restored_tensor = torch.nn.functional.pad(restored_tensor, [(-i) for
i in unpadding])
return restored_tensor
class CombineTensorPatchesNew(nn.Module):
"""Module that combine patches from tensors.
In the simplest case, the output value of the operator with input size
:math:`(B, N, C, H_{out}, W_{out})` is :math:`(B, C, H, W)`.
where
- :math:`B` is the batch size.
- :math:`N` denotes the total number of extracted patches stacked in
- :math:`C` denotes the number of input channels.
- :math:`H`, :math:`W` the input height and width of the input in pixels.
- :math:`H_{out}`, :math:`W_{out}` denote to denote to the patch size
defined in the function signature.
left-right and top-bottom order.
* :attr:`window_size` is the size of the sliding window and controls the
shape of the output tensor and defines the shape of the output patch.
* :attr:`stride` controls the stride to apply to the sliding window and
regulates the overlapping between the extracted patches.
* :attr:`padding` controls the amount of implicit zeros-paddings on both
sizes at each dimension.
The parameters :attr:`window_size`, :attr:`stride` and :attr:`padding` can
be either:
- a single ``int`` -- in which case the same value is used for the
height and width dimension.
- a ``tuple`` of two ints -- in which case, the first `int` is used for
the height dimension, and the second `int` for the width dimension.
Args:
patches: patched tensor.
window_size: the size of the sliding window and the output patch size.
unpadding: remove the padding added to both side of the input.
Shape:
- Input: :math:`(B, N, C, H_{out}, W_{out})`
- Output: :math:`(B, C, H, W)`
Example:
>>> out = extract_tensor_patches(torch.arange(16).view(1, 1, 4, 4), window_size=(2, 2), stride=(2, 2))
>>> combine_tensor_patches(out, window_size=(2, 2), stride=(2, 2))
tensor([[[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]]]])
"""
def __init__(self, window_size: 'Union[int, Tuple[int, int]]',
unpadding: 'Union[int, Tuple[int, int]]'=0) ->None:
super().__init__()
self.window_size: 'Tuple[int, int]' = _pair(window_size)
pad: 'Tuple[int, int]' = _pair(unpadding)
self.unpadding: 'Tuple[int, int, int, int]' = (pad[0], pad[0], pad[
1], pad[1])
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| shiyangc-intusurg/kornia | CombineTensorPatches | false | 16,431 | [
"ECL-2.0",
"Apache-2.0"
] | 4,894 | 2e2512f8f20d300d8732e5873e16336b5a01f3bd | https://github.com/shiyangc-intusurg/kornia/tree/2e2512f8f20d300d8732e5873e16336b5a01f3bd |
KLDivergence | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/td/ctdj5kazgiki6gdaadhqtp2x7tq2ee5ey5hqqdcoqmp54jyhf74f.py
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax), kwargs = {})
triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/wq/cwq5ar7x4w72rft7s3gjlikyil3h3bo3kpq25ppgk4lkuasnqkaz.py
# Topologically Sorted Source Nodes: [log, mul, log_softmax, mul_1, sub, loss, mean], Original ATen: [aten.log, aten.mul, aten._log_softmax, aten.sub, aten.sum, aten.mean]
# Source node to ATen node mapping:
# log => log
# log_softmax => exp, log_1, sub_1, sum_1
# loss => sum_2
# mean => mean
# mul => mul
# mul_1 => mul_1
# sub => sub_2
# Graph fragment:
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%arg0_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %log), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %sub_1), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sub_2, [1]), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_2,), kwargs = {})
triton_per_fused__log_softmax_log_mean_mul_sub_sum_1 = async_compile.triton('triton_per_fused__log_softmax_log_mean_mul_sub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_log_mean_mul_sub_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_log_mean_mul_sub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = (rindex // 16)
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None)
tmp3 = tl.load(in_ptr1 + (r0 + (64*r1)), None)
tmp5 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None)
tmp8 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None)
tmp11 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None)
tmp18 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None)
tmp25 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None)
tmp32 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None)
tmp1 = tl_math.log(tmp0)
tmp2 = tmp0 * tmp1
tmp4 = tl_math.exp(tmp3)
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp4 + tmp6
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp12 = tl_math.exp(tmp11)
tmp13 = tmp10 + tmp12
tmp14 = tl_math.log(tmp13)
tmp15 = tmp3 - tmp14
tmp16 = tmp0 * tmp15
tmp17 = tmp2 - tmp16
tmp19 = tl_math.log(tmp18)
tmp20 = tmp18 * tmp19
tmp21 = tmp5 - tmp14
tmp22 = tmp18 * tmp21
tmp23 = tmp20 - tmp22
tmp24 = tmp17 + tmp23
tmp26 = tl_math.log(tmp25)
tmp27 = tmp25 * tmp26
tmp28 = tmp8 - tmp14
tmp29 = tmp25 * tmp28
tmp30 = tmp27 - tmp29
tmp31 = tmp24 + tmp30
tmp33 = tl_math.log(tmp32)
tmp34 = tmp32 * tmp33
tmp35 = tmp11 - tmp14
tmp36 = tmp32 * tmp35
tmp37 = tmp34 - tmp36
tmp38 = tmp31 + tmp37
tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK])
tmp41 = tl.sum(tmp39, 1)[:, None]
tmp42 = 64.0
tmp43 = tmp41 / tmp42
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp43, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg1_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [log, mul, log_softmax, mul_1, sub, loss, mean], Original ATen: [aten.log, aten.mul, aten._log_softmax, aten.sub, aten.sum, aten.mean]
triton_per_fused__log_softmax_log_mean_mul_sub_sum_1.run(buf3, arg0_1, buf0, 1, 64, grid=grid(1), stream=stream0)
del arg0_1
del buf0
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.optim
def kl_divergence(y, target, mask=None, reduce=True):
loss = (target * torch.log(target) - target * F.log_softmax(y, 1)).sum(1)
if mask is not None:
loss = mask * loss
if reduce:
return loss.mean()
else:
return loss
class KLDivergence(nn.Module):
def __init__(self, reduce):
super().__init__()
self.reduce = reduce
def forward(self, y, target, mask=None, *args, **kwargs):
return kl_divergence(y, target.detach(), mask, self.reduce)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'reduce': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn.functional as F
import torch.nn as nn
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_log_mean_mul_sub_sum_1(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp3 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp8 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp11 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp18 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp25 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp32 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp1 = tl_math.log(tmp0)
tmp2 = tmp0 * tmp1
tmp4 = tl_math.exp(tmp3)
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp4 + tmp6
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp12 = tl_math.exp(tmp11)
tmp13 = tmp10 + tmp12
tmp14 = tl_math.log(tmp13)
tmp15 = tmp3 - tmp14
tmp16 = tmp0 * tmp15
tmp17 = tmp2 - tmp16
tmp19 = tl_math.log(tmp18)
tmp20 = tmp18 * tmp19
tmp21 = tmp5 - tmp14
tmp22 = tmp18 * tmp21
tmp23 = tmp20 - tmp22
tmp24 = tmp17 + tmp23
tmp26 = tl_math.log(tmp25)
tmp27 = tmp25 * tmp26
tmp28 = tmp8 - tmp14
tmp29 = tmp25 * tmp28
tmp30 = tmp27 - tmp29
tmp31 = tmp24 + tmp30
tmp33 = tl_math.log(tmp32)
tmp34 = tmp32 * tmp33
tmp35 = tmp11 - tmp14
tmp36 = tmp32 * tmp35
tmp37 = tmp34 - tmp36
tmp38 = tmp31 + tmp37
tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK])
tmp41 = tl.sum(tmp39, 1)[:, None]
tmp42 = 64.0
tmp43 = tmp41 / tmp42
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp43, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
triton_per_fused__log_softmax_log_mean_mul_sub_sum_1[grid(1)](buf3,
arg0_1, buf0, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del buf0
return buf3,
def kl_divergence(y, target, mask=None, reduce=True):
loss = (target * torch.log(target) - target * F.log_softmax(y, 1)).sum(1)
if mask is not None:
loss = mask * loss
if reduce:
return loss.mean()
else:
return loss
class KLDivergenceNew(nn.Module):
def __init__(self, reduce):
super().__init__()
self.reduce = reduce
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| shrutimoy10/cords | KLDivergence | false | 16,432 | [
"MIT"
] | 185 | 8f8d087098afafd352f793821911d80eb7b39a7d | https://github.com/shrutimoy10/cords/tree/8f8d087098afafd352f793821911d80eb7b39a7d |
JointsMSELoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/6b/c6ba66mnjaoaqcyan4gntekhevsjcuebgcvasyx5yi5vnfp42otw.py
# Topologically Sorted Source Nodes: [mse_loss, mul, loss, mse_loss_1, mul_1, loss_1, mse_loss_2, mul_2, loss_2, mse_loss_3, mul_3, loss_3, truediv], Original ATen: [aten.mse_loss, aten.mul, aten.add, aten.div]
# Source node to ATen node mapping:
# loss => add
# loss_1 => add_1
# loss_2 => add_2
# loss_3 => add_3
# mse_loss => mean, pow_1, sub
# mse_loss_1 => mean_1, pow_2, sub_1
# mse_loss_2 => mean_2, pow_3, sub_2
# mse_loss_3 => mean_3, pow_4, sub_3
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# truediv => div
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%squeeze, %squeeze_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 0.5), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 0), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%squeeze_2, %squeeze_3), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_2,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean_1, 0.5), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %mul_1), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%squeeze_4, %squeeze_5), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {})
# %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_3,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean_2, 0.5), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %mul_2), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%squeeze_6, %squeeze_7), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_3, 2), kwargs = {})
# %mean_3 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_4,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean_3, 0.5), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %mul_3), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_3, 4), kwargs = {})
triton_per_fused_add_div_mse_loss_mul_0 = async_compile.triton('triton_per_fused_add_div_mse_loss_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mse_loss_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = (rindex // 16)
tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None)
tmp1 = tl.load(in_ptr1 + (r0 + (64*r1)), None)
tmp7 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None)
tmp8 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None)
tmp14 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None)
tmp15 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None)
tmp21 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None)
tmp22 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.sum(tmp4, 1)[:, None]
tmp9 = tmp7 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.sum(tmp11, 1)[:, None]
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK])
tmp20 = tl.sum(tmp18, 1)[:, None]
tmp23 = tmp21 - tmp22
tmp24 = tmp23 * tmp23
tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK])
tmp27 = tl.sum(tmp25, 1)[:, None]
tmp28 = 64.0
tmp29 = tmp6 / tmp28
tmp30 = 0.5
tmp31 = tmp29 * tmp30
tmp32 = 0.0
tmp33 = tmp31 + tmp32
tmp34 = tmp13 / tmp28
tmp35 = tmp34 * tmp30
tmp36 = tmp33 + tmp35
tmp37 = tmp20 / tmp28
tmp38 = tmp37 * tmp30
tmp39 = tmp36 + tmp38
tmp40 = tmp27 / tmp28
tmp41 = tmp40 * tmp30
tmp42 = tmp39 + tmp41
tmp43 = 0.25
tmp44 = tmp42 * tmp43
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp44, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf4 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mse_loss, mul, loss, mse_loss_1, mul_1, loss_1, mse_loss_2, mul_2, loss_2, mse_loss_3, mul_3, loss_3, truediv], Original ATen: [aten.mse_loss, aten.mul, aten.add, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mse_loss_mul_0.run(buf4, arg0_1, arg1_1, 1, 64, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class JointsMSELoss(nn.Module):
def __init__(self):
super(JointsMSELoss, self).__init__()
self.criterion = nn.MSELoss(reduction='mean')
def forward(self, output, target, target_weight=None):
batch_size = output.size(0)
num_keypoints = output.size(1)
heatmaps_pred = output.reshape((batch_size, num_keypoints, -1)).split(
1, 1)
heatmaps_gt = target.reshape((batch_size, num_keypoints, -1)).split(
1, 1)
loss = 0
for idx in range(num_keypoints):
heatmap_pred = heatmaps_pred[idx].squeeze()
heatmap_gt = heatmaps_gt[idx].squeeze()
if target_weight is not None:
loss += 0.5 * self.criterion(heatmap_pred.mul(target_weight
[:, idx]), heatmap_gt.mul(target_weight[:, idx]))
else:
loss += 0.5 * self.criterion(heatmap_pred, heatmap_gt)
return loss / num_keypoints
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp7 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp8 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp14 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp15 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp21 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp22 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.sum(tmp4, 1)[:, None]
tmp9 = tmp7 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.sum(tmp11, 1)[:, None]
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK])
tmp20 = tl.sum(tmp18, 1)[:, None]
tmp23 = tmp21 - tmp22
tmp24 = tmp23 * tmp23
tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK])
tmp27 = tl.sum(tmp25, 1)[:, None]
tmp28 = 64.0
tmp29 = tmp6 / tmp28
tmp30 = 0.5
tmp31 = tmp29 * tmp30
tmp32 = 0.0
tmp33 = tmp31 + tmp32
tmp34 = tmp13 / tmp28
tmp35 = tmp34 * tmp30
tmp36 = tmp33 + tmp35
tmp37 = tmp20 / tmp28
tmp38 = tmp37 * tmp30
tmp39 = tmp36 + tmp38
tmp40 = tmp27 / tmp28
tmp41 = tmp40 * tmp30
tmp42 = tmp39 + tmp41
tmp43 = 0.25
tmp44 = tmp42 * tmp43
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp44, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf4 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mse_loss_mul_0[grid(1)](buf4, arg0_1,
arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf4,
class JointsMSELossNew(nn.Module):
def __init__(self):
super(JointsMSELossNew, self).__init__()
self.criterion = nn.MSELoss(reduction='mean')
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| shunya-toyokawa/qanet_human_parts_segmentatiom | JointsMSELoss | false | 16,433 | [
"MIT"
] | 72 | 5527b247acd65534b455c26e3692a14b31669602 | https://github.com/shunya-toyokawa/qanet_human_parts_segmentatiom/tree/5527b247acd65534b455c26e3692a14b31669602 |
BoundedIoULoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/7x/c7xwrwphgpxaqzcdkusz7k3bdfxknc3itqkz2qte5dmrgecbr7ie.py
# Topologically Sorted Source Nodes: [cat, loss, lt, pow_1, mul, truediv_3, sub_5, loss_1, sum_1], Original ATen: [aten.cat, aten.rsub, aten.lt, aten.pow, aten.mul, aten.div, aten.sub, aten.where, aten.sum]
# Source node to ATen node mapping:
# cat => cat
# loss => sub_4
# loss_1 => where
# lt => lt
# mul => mul
# pow_1 => pow_1
# sub_5 => sub_5
# sum_1 => sum_1
# truediv_3 => div_3
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%clamp_min, %minimum], -1), kwargs = {})
# %sub_4 : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %cat), kwargs = {})
# %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%sub_4, 0.2), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_4, 2), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.5), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 0.2), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_4, 0.1), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%lt, %div_3, %sub_5), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%where,), kwargs = {})
triton_per_fused_cat_div_lt_mul_pow_rsub_sub_sum_where_0 = async_compile.triton('triton_per_fused_cat_div_lt_mul_pow_rsub_sub_sum_where_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_cat_div_lt_mul_pow_rsub_sub_sum_where_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_cat_div_lt_mul_pow_rsub_sub_sum_where_0(in_ptr0, in_ptr1, out_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex % 8
r1 = (rindex // 8) % 8
r2 = (rindex // 64)
r3 = rindex
tmp0 = r0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (tl.broadcast_to(32 + (4*r1) + (64*r2) + r0, [RBLOCK])), tmp4, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (tl.broadcast_to((4*r1) + (64*r2) + r0, [RBLOCK])), tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 - tmp6
tmp8 = tmp6 + tmp5
tmp9 = tl.load(in_ptr1 + (tl.broadcast_to((4*r1) + (64*r2) + r0, [RBLOCK])), tmp4, eviction_policy='evict_last', other=0.0)
tmp10 = tl.load(in_ptr1 + (tl.broadcast_to(32 + (4*r1) + (64*r2) + r0, [RBLOCK])), tmp4, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tmp8 - tmp11
tmp13 = tl_math.abs(tmp12)
tmp14 = tmp7 - tmp13
tmp15 = tmp7 + tmp13
tmp16 = 0.001
tmp17 = tmp15 + tmp16
tmp18 = tmp14 / tmp17
tmp19 = 0.0
tmp20 = triton_helpers.maximum(tmp18, tmp19)
tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype)
tmp22 = tl.where(tmp4, tmp20, tmp21)
tmp23 = tmp0 >= tmp3
tmp24 = tl.full([1], 8, tl.int64)
tmp25 = tmp0 < tmp24
tmp26 = tl.load(in_ptr0 + (tl.broadcast_to(32 + (4*r1) + (64*r2) + ((-4) + r0), [RBLOCK])), tmp23, eviction_policy='evict_last', other=0.0)
tmp27 = tl.load(in_ptr0 + (tl.broadcast_to((4*r1) + (64*r2) + ((-4) + r0), [RBLOCK])), tmp23, eviction_policy='evict_last', other=0.0)
tmp28 = tmp26 - tmp27
tmp29 = tl.load(in_ptr1 + (tl.broadcast_to(32 + (4*r1) + (64*r2) + ((-4) + r0), [RBLOCK])), tmp23, eviction_policy='evict_last', other=0.0)
tmp30 = tl.load(in_ptr1 + (tl.broadcast_to((4*r1) + (64*r2) + ((-4) + r0), [RBLOCK])), tmp23, eviction_policy='evict_last', other=0.0)
tmp31 = tmp29 - tmp30
tmp32 = tmp31 + tmp16
tmp33 = tmp28 / tmp32
tmp34 = tmp28 + tmp16
tmp35 = tmp31 / tmp34
tmp36 = triton_helpers.minimum(tmp33, tmp35)
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp23, tmp36, tmp37)
tmp39 = tl.where(tmp4, tmp22, tmp38)
tmp40 = 1.0
tmp41 = tmp40 - tmp39
tmp42 = 0.2
tmp43 = tmp41 < tmp42
tmp44 = tmp41 * tmp41
tmp45 = 0.5
tmp46 = tmp44 * tmp45
tmp47 = 5.0
tmp48 = tmp46 * tmp47
tmp49 = 0.1
tmp50 = tmp41 - tmp49
tmp51 = tl.where(tmp43, tmp48, tmp50)
tmp52 = tl.broadcast_to(tmp51, [RBLOCK])
tmp54 = triton_helpers.promote_to_tensor(tl.sum(tmp52, 0))
tl.store(out_ptr1 + (tl.full([1], 0, tl.int32)), tmp54, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [cat, loss, lt, pow_1, mul, truediv_3, sub_5, loss_1, sum_1], Original ATen: [aten.cat, aten.rsub, aten.lt, aten.pow, aten.mul, aten.div, aten.sub, aten.where, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_cat_div_lt_mul_pow_rsub_sub_sum_where_0.run(arg1_1, arg0_1, buf1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class BoundedIoULoss(nn.Module):
def __init__(self, beta=0.2, eps=0.001):
super(BoundedIoULoss, self).__init__()
self.beta = beta
self.eps = eps
def forward(self, pred, target, weight=None):
pred_ctr_2x = pred[:, :2] + pred[:, 2:]
pred_wh = pred[:, 2:] - pred[:, :2]
with torch.no_grad():
target_ctr_2x = target[:, :2] + target[:, 2:]
target_wh = target[:, 2:] - target[:, :2]
d_xy_2x = (target_ctr_2x - pred_ctr_2x).abs()
loss_xy = torch.clamp((target_wh - d_xy_2x) / (target_wh + d_xy_2x +
self.eps), min=0)
loss_wh = torch.min(target_wh / (pred_wh + self.eps), pred_wh / (
target_wh + self.eps))
loss = 1 - torch.cat([loss_xy, loss_wh], dim=-1)
if self.beta >= 1e-05:
loss = torch.where(loss < self.beta, 0.5 * loss ** 2 / self.
beta, loss - 0.5 * self.beta)
if weight is not None:
loss = loss * weight
return loss.sum()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_cat_div_lt_mul_pow_rsub_sub_sum_where_0(in_ptr0,
in_ptr1, out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex % 8
r1 = rindex // 8 % 8
r2 = rindex // 64
tmp0 = r0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + tl.broadcast_to(32 + 4 * r1 + 64 * r2 + r0, [
RBLOCK]), tmp4, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + tl.broadcast_to(4 * r1 + 64 * r2 + r0, [RBLOCK
]), tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 - tmp6
tmp8 = tmp6 + tmp5
tmp9 = tl.load(in_ptr1 + tl.broadcast_to(4 * r1 + 64 * r2 + r0, [RBLOCK
]), tmp4, eviction_policy='evict_last', other=0.0)
tmp10 = tl.load(in_ptr1 + tl.broadcast_to(32 + 4 * r1 + 64 * r2 + r0, [
RBLOCK]), tmp4, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tmp8 - tmp11
tmp13 = tl_math.abs(tmp12)
tmp14 = tmp7 - tmp13
tmp15 = tmp7 + tmp13
tmp16 = 0.001
tmp17 = tmp15 + tmp16
tmp18 = tmp14 / tmp17
tmp19 = 0.0
tmp20 = triton_helpers.maximum(tmp18, tmp19)
tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype)
tmp22 = tl.where(tmp4, tmp20, tmp21)
tmp23 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp26 = tl.load(in_ptr0 + tl.broadcast_to(32 + 4 * r1 + 64 * r2 + (-4 +
r0), [RBLOCK]), tmp23, eviction_policy='evict_last', other=0.0)
tmp27 = tl.load(in_ptr0 + tl.broadcast_to(4 * r1 + 64 * r2 + (-4 + r0),
[RBLOCK]), tmp23, eviction_policy='evict_last', other=0.0)
tmp28 = tmp26 - tmp27
tmp29 = tl.load(in_ptr1 + tl.broadcast_to(32 + 4 * r1 + 64 * r2 + (-4 +
r0), [RBLOCK]), tmp23, eviction_policy='evict_last', other=0.0)
tmp30 = tl.load(in_ptr1 + tl.broadcast_to(4 * r1 + 64 * r2 + (-4 + r0),
[RBLOCK]), tmp23, eviction_policy='evict_last', other=0.0)
tmp31 = tmp29 - tmp30
tmp32 = tmp31 + tmp16
tmp33 = tmp28 / tmp32
tmp34 = tmp28 + tmp16
tmp35 = tmp31 / tmp34
tmp36 = triton_helpers.minimum(tmp33, tmp35)
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp23, tmp36, tmp37)
tmp39 = tl.where(tmp4, tmp22, tmp38)
tmp40 = 1.0
tmp41 = tmp40 - tmp39
tmp42 = 0.2
tmp43 = tmp41 < tmp42
tmp44 = tmp41 * tmp41
tmp45 = 0.5
tmp46 = tmp44 * tmp45
tmp47 = 5.0
tmp48 = tmp46 * tmp47
tmp49 = 0.1
tmp50 = tmp41 - tmp49
tmp51 = tl.where(tmp43, tmp48, tmp50)
tmp52 = tl.broadcast_to(tmp51, [RBLOCK])
tmp54 = triton_helpers.promote_to_tensor(tl.sum(tmp52, 0))
tl.store(out_ptr1 + tl.full([1], 0, tl.int32), tmp54, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_cat_div_lt_mul_pow_rsub_sub_sum_where_0[grid(1)](
arg1_1, arg0_1, buf1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class BoundedIoULossNew(nn.Module):
def __init__(self, beta=0.2, eps=0.001):
super(BoundedIoULossNew, self).__init__()
self.beta = beta
self.eps = eps
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| shunya-toyokawa/qanet_human_parts_segmentatiom | BoundedIoULoss | false | 16,434 | [
"MIT"
] | 72 | 5527b247acd65534b455c26e3692a14b31669602 | https://github.com/shunya-toyokawa/qanet_human_parts_segmentatiom/tree/5527b247acd65534b455c26e3692a14b31669602 |
HyperpriorAnalysis | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ri/criqkuezky4rfayw6ltqhgahwfjt2yn2oze6l7t3fkgbcyinvotk.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 70400
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 220
y1 = (yindex // 220)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (220*x2) + (1980*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/wh/cwhkswckmxft7monsnauuprtatvuatkc4bi2kgvlifmba3jwe76j.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 880
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 220
y1 = (yindex // 220)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (220*x2) + (901120*y1)), tmp0, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/pa/cpapdgqqws5ag5ehwpofrqtvw5xphxp2fs56js4yos3j74haw2xm.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 102400
xnumel = 25
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 320
y1 = (yindex // 320)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (320*x2) + (8000*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ph/cphj34isihiy7vcgo2uyriwgge7pmnu2sij75kxq7ggby6hlmwvh.py
# Topologically Sorted Source Nodes: [conv2d, x, pad], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d]
# Source node to ATen node mapping:
# conv2d => convolution
# pad => _unsafe_index, _unsafe_index_1
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {})
triton_poi_fused_convolution_reflection_pad2d_relu_3 = async_compile.triton('triton_poi_fused_convolution_reflection_pad2d_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8388608],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_reflection_pad2d_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 5918720
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 320
x1 = (xindex // 320) % 68
x2 = (xindex // 21760) % 68
x3 = (xindex // 1479680)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (1310400 + x0 + ((-20480)*(tl_math.abs((-63) + (tl_math.abs((-2) + x2))))) + ((-320)*(tl_math.abs((-63) + (tl_math.abs((-2) + x1))))) + (1310720*x3)), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x4), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/v4/cv47rfntyy3kzrek5u7vimlqya432tgzptvdqdmafj6vdacriaky.py
# Topologically Sorted Source Nodes: [conv2d_1, x_1, pad_1], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# pad_1 => _unsafe_index_2, _unsafe_index_3
# x_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_4, %primals_5, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
# %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_1, [None, None, %sub_5, None]), kwargs = {})
# %_unsafe_index_3 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_2, [None, None, None, %sub_5]), kwargs = {})
triton_poi_fused_convolution_reflection_pad2d_relu_4 = async_compile.triton('triton_poi_fused_convolution_reflection_pad2d_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_reflection_pad2d_relu_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1658880
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 320
x1 = (xindex // 320) % 36
x2 = (xindex // 11520) % 36
x3 = (xindex // 414720)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (327360 + x0 + ((-10240)*(tl_math.abs((-31) + (tl_math.abs((-2) + x2))))) + ((-320)*(tl_math.abs((-31) + (tl_math.abs((-2) + x1))))) + (327680*x3)), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x4), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/yi/cyipfmkmpblqs4aii7e57iy25xgn5wjt4gc2pyq36tdwjza3ab4v.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_2 => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_3, %primals_6, %primals_7, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_5 = async_compile.triton('triton_poi_fused_convolution_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 256], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_5(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 1280
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 320
y1 = (yindex // 320)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (320*x2) + (81920*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (256*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5n/c5nmtj336g7l5l66tr54hrh23mpif2c4y6mvrwfj7oymjbig3oot.py
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_4, %primals_5, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
# %le_18 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_6 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1310720
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 320
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/pd/cpdnddlltf377q3jfxsohyo3jb3e2v5oczjlv7ymcx5pggaqhm34.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le_37 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_7 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8388608],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 5242880
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 320
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (320, 220, 3, 3), (1980, 9, 3, 1))
assert_size_stride(primals_2, (320, ), (1, ))
assert_size_stride(primals_3, (4, 220, 64, 64), (901120, 4096, 64, 1))
assert_size_stride(primals_4, (320, 320, 5, 5), (8000, 25, 5, 1))
assert_size_stride(primals_5, (320, ), (1, ))
assert_size_stride(primals_6, (320, 320, 5, 5), (8000, 25, 5, 1))
assert_size_stride(primals_7, (320, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((320, 220, 3, 3), (1980, 1, 660, 220), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 70400, 9, grid=grid(70400, 9), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 220, 64, 64), (901120, 1, 14080, 220), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_3, buf1, 880, 4096, grid=grid(880, 4096), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((320, 320, 5, 5), (8000, 1, 1600, 320), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_4, buf2, 102400, 25, grid=grid(102400, 25), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((320, 320, 5, 5), (8000, 1, 1600, 320), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_6, buf3, 102400, 25, grid=grid(102400, 25), stream=stream0)
del primals_6
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 320, 64, 64), (1310720, 1, 20480, 320))
buf5 = empty_strided_cuda((4, 320, 68, 68), (1479680, 1, 21760, 320), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, x, pad], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d]
triton_poi_fused_convolution_reflection_pad2d_relu_3.run(buf4, primals_2, buf5, 5918720, grid=grid(5918720), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, buf2, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 320, 32, 32), (327680, 1, 10240, 320))
buf7 = empty_strided_cuda((4, 320, 36, 36), (414720, 1, 11520, 320), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_1, x_1, pad_1], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d]
triton_poi_fused_convolution_reflection_pad2d_relu_4.run(buf6, primals_5, buf7, 1658880, grid=grid(1658880), stream=stream0)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, buf3, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 320, 16, 16), (81920, 1, 5120, 320))
buf9 = empty_strided_cuda((4, 320, 16, 16), (81920, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_5.run(buf8, primals_7, buf9, 1280, 256, grid=grid(1280, 256), stream=stream0)
del buf8
del primals_7
buf10 = empty_strided_cuda((4, 320, 32, 32), (327680, 1, 10240, 320), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_6.run(buf6, primals_5, buf10, 1310720, grid=grid(1310720), stream=stream0)
del buf6
del primals_5
buf11 = empty_strided_cuda((4, 320, 64, 64), (1310720, 1, 20480, 320), torch.bool)
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_7.run(buf4, primals_2, buf11, 5242880, grid=grid(5242880), stream=stream0)
del buf4
del primals_2
return (buf9, buf0, buf1, buf2, buf3, buf5, buf7, buf10, buf11, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((320, 220, 3, 3), (1980, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((320, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 220, 64, 64), (901120, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((320, 320, 5, 5), (8000, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((320, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((320, 320, 5, 5), (8000, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((320, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class HyperpriorAnalysis(nn.Module):
"""
Hyperprior 'analysis model' as proposed in [1].
[1] Ballé et. al., "Variational image compression with a scale hyperprior",
arXiv:1802.01436 (2018).
C: Number of input channels
"""
def __init__(self, C=220, N=320, activation='relu'):
super(HyperpriorAnalysis, self).__init__()
cnn_kwargs = dict(kernel_size=5, stride=2, padding=2, padding_mode=
'reflect')
self.activation = getattr(F, activation)
self.n_downsampling_layers = 2
self.conv1 = nn.Conv2d(C, N, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv2d(N, N, **cnn_kwargs)
self.conv3 = nn.Conv2d(N, N, **cnn_kwargs)
def forward(self, x):
x = self.activation(self.conv1(x))
x = self.activation(self.conv2(x))
x = self.conv3(x)
return x
def get_inputs():
return [torch.rand([4, 220, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 70400
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 220
y1 = yindex // 220
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 220 * x2 + 1980 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 880
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 220
y1 = yindex // 220
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 220 * x2 + 901120 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 320
y1 = yindex // 320
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 320 * x2 + 8000 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_3(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 320
x1 = xindex // 320 % 68
x2 = xindex // 21760 % 68
x3 = xindex // 1479680
x4 = xindex
tmp0 = tl.load(in_ptr0 + (1310400 + x0 + -20480 * tl_math.abs(-63 +
tl_math.abs(-2 + x2)) + -320 * tl_math.abs(-63 + tl_math.abs(-2 +
x1)) + 1310720 * x3), None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + x4, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_4(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 320
x1 = xindex // 320 % 36
x2 = xindex // 11520 % 36
x3 = xindex // 414720
x4 = xindex
tmp0 = tl.load(in_ptr0 + (327360 + x0 + -10240 * tl_math.abs(-31 +
tl_math.abs(-2 + x2)) + -320 * tl_math.abs(-31 + tl_math.abs(-2 +
x1)) + 327680 * x3), None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + x4, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_5(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 1280
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 320
y1 = yindex // 320
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 320 * x2 + 81920 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 256 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_6(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 320
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_7(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 320
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (320, 220, 3, 3), (1980, 9, 3, 1))
assert_size_stride(primals_2, (320,), (1,))
assert_size_stride(primals_3, (4, 220, 64, 64), (901120, 4096, 64, 1))
assert_size_stride(primals_4, (320, 320, 5, 5), (8000, 25, 5, 1))
assert_size_stride(primals_5, (320,), (1,))
assert_size_stride(primals_6, (320, 320, 5, 5), (8000, 25, 5, 1))
assert_size_stride(primals_7, (320,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((320, 220, 3, 3), (1980, 1, 660, 220),
torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(70400, 9)](primals_1, buf0, 70400, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 220, 64, 64), (901120, 1, 14080, 220),
torch.float32)
triton_poi_fused_1[grid(880, 4096)](primals_3, buf1, 880, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((320, 320, 5, 5), (8000, 1, 1600, 320),
torch.float32)
triton_poi_fused_2[grid(102400, 25)](primals_4, buf2, 102400, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((320, 320, 5, 5), (8000, 1, 1600, 320),
torch.float32)
triton_poi_fused_2[grid(102400, 25)](primals_6, buf3, 102400, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_6
buf4 = extern_kernels.convolution(buf1, buf0, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 320, 64, 64), (1310720, 1, 20480, 320))
buf5 = empty_strided_cuda((4, 320, 68, 68), (1479680, 1, 21760, 320
), torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_3[grid(5918720)](
buf4, primals_2, buf5, 5918720, XBLOCK=512, num_warps=8,
num_stages=1)
buf6 = extern_kernels.convolution(buf5, buf2, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 320, 32, 32), (327680, 1, 10240, 320))
buf7 = empty_strided_cuda((4, 320, 36, 36), (414720, 1, 11520, 320),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_4[grid(1658880)](
buf6, primals_5, buf7, 1658880, XBLOCK=1024, num_warps=4,
num_stages=1)
buf8 = extern_kernels.convolution(buf7, buf3, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 320, 16, 16), (81920, 1, 5120, 320))
buf9 = empty_strided_cuda((4, 320, 16, 16), (81920, 256, 16, 1),
torch.float32)
triton_poi_fused_convolution_5[grid(1280, 256)](buf8, primals_7,
buf9, 1280, 256, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del buf8
del primals_7
buf10 = empty_strided_cuda((4, 320, 32, 32), (327680, 1, 10240, 320
), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_6[grid(1310720)](
buf6, primals_5, buf10, 1310720, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf6
del primals_5
buf11 = empty_strided_cuda((4, 320, 64, 64), (1310720, 1, 20480,
320), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_7[grid(5242880)](
buf4, primals_2, buf11, 5242880, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf4
del primals_2
return buf9, buf0, buf1, buf2, buf3, buf5, buf7, buf10, buf11
class HyperpriorAnalysisNew(nn.Module):
"""
Hyperprior 'analysis model' as proposed in [1].
[1] Ballé et. al., "Variational image compression with a scale hyperprior",
arXiv:1802.01436 (2018).
C: Number of input channels
"""
def __init__(self, C=220, N=320, activation='relu'):
super(HyperpriorAnalysisNew, self).__init__()
cnn_kwargs = dict(kernel_size=5, stride=2, padding=2, padding_mode=
'reflect')
self.activation = getattr(F, activation)
self.n_downsampling_layers = 2
self.conv1 = nn.Conv2d(C, N, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv2d(N, N, **cnn_kwargs)
self.conv3 = nn.Conv2d(N, N, **cnn_kwargs)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| sedrickkeh/high-fidelity-dual-image | HyperpriorAnalysis | false | 16,435 | [
"Apache-2.0"
] | 266 | 9cefd378467826b91596653df38666e469bb23e0 | https://github.com/sedrickkeh/high-fidelity-dual-image/tree/9cefd378467826b91596653df38666e469bb23e0 |
NormalizeScale | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/np/cnps5tn7p5oel2g4xflss7hji5wez7yd4am3swgdezysbdfcjyvl.py
# Topologically Sorted Source Nodes: [bottom_normalized, bottom_normalized_scaled], Original ATen: [aten.div, aten.mul]
# Source node to ATen node mapping:
# bottom_normalized => div
# bottom_normalized_scaled => mul
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %expand), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %primals_2), kwargs = {})
triton_poi_fused_div_mul_0 = async_compile.triton('triton_poi_fused_div_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x3 = (xindex // 64)
x5 = xindex % 16
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x5 + (64*x3)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x5 + (64*x3)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x5 + (64*x3)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x5 + (64*x3)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tmp17 = tmp15 * tmp16
tl.store(out_ptr0 + (x4), tmp17, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [bottom_normalized, bottom_normalized_scaled], Original ATen: [aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_div_mul_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0)
del primals_2
return (buf0, primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class NormalizeScale(nn.Module):
def __init__(self, dim, init_norm=20):
super(NormalizeScale, self).__init__()
self.init_norm = init_norm
self.weight = nn.Parameter(torch.ones(1, dim) * init_norm)
def forward(self, bottom):
bottom_normalized = F.normalize(bottom, p=2, dim=1)
bottom_normalized_scaled = bottom_normalized * self.weight
return bottom_normalized_scaled
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x3 = xindex // 64
x5 = xindex % 16
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + (x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tmp17 = tmp15 * tmp16
tl.store(out_ptr0 + x4, tmp17, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_mul_0[grid(256)](primals_1, primals_2, buf0,
256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf0, primals_1
class NormalizeScaleNew(nn.Module):
def __init__(self, dim, init_norm=20):
super(NormalizeScaleNew, self).__init__()
self.init_norm = init_norm
self.weight = nn.Parameter(torch.ones(1, dim) * init_norm)
def forward(self, input_0):
primals_2 = self.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| sibeiyang/sgmn | NormalizeScale | false | 16,436 | [
"MIT"
] | 130 | 00731b4f2202246d40a36d2a6727c599e6e649aa | https://github.com/sibeiyang/sgmn/tree/00731b4f2202246d40a36d2a6727c599e6e649aa |
PrimaryCapsLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/my/cmywhultrq2bo2glrc4pxf5zvi76gyyi6wzgrympkqvplmtgmnrp.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# out => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/q7/cq76sscp5jckpbmg3kidehbzbntesd4wwpimroumxaaoq32jzmpb.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# x => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %view_2), kwargs = {})
triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = 1.0
tmp13 = tmp11 + tmp12
tmp14 = tmp11 / tmp13
tmp15 = libdevice.sqrt(tmp11)
tmp16 = tmp14 / tmp15
tmp17 = tmp0 * tmp16
tl.store(out_ptr0 + (x2), tmp17, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (16, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (16, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 1, 1), (16, 1, 1, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 64, grid=grid(64), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mul]
triton_poi_fused_mul_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0)
return (buf2, primals_1, primals_3, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((16, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def squash(x):
lengths2 = x.pow(2).sum(dim=2)
lengths = lengths2.sqrt()
x = x * (lengths2 / (1 + lengths2) / lengths).view(x.size(0), x.size(1), 1)
return x
class PrimaryCapsLayer(nn.Module):
def __init__(self, input_channels, output_caps, output_dim, kernel_size,
stride):
super(PrimaryCapsLayer, self).__init__()
self.conv = nn.Conv2d(input_channels, output_caps * output_dim,
kernel_size=kernel_size, stride=stride)
self.input_channels = input_channels
self.output_caps = output_caps
self.output_dim = output_dim
def forward(self, input):
out = self.conv(input)
N, _C, H, W = out.size()
out = out.view(N, self.output_caps, self.output_dim, H, W)
out = out.permute(0, 1, 3, 4, 2).contiguous()
out = out.view(out.size(0), -1, out.size(4))
out = squash(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_channels': 4, 'output_caps': 4, 'output_dim': 4,
'kernel_size': 4, 'stride': 1}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = 1.0
tmp13 = tmp11 + tmp12
tmp14 = tmp11 / tmp13
tmp15 = libdevice.sqrt(tmp11)
tmp16 = tmp14 / tmp15
tmp17 = tmp0 * tmp16
tl.store(out_ptr0 + x2, tmp17, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (16, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 1, 1), (16, 1, 1, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(64)](buf1, primals_2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_mul_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
return buf2, primals_1, primals_3, buf1
def squash(x):
lengths2 = x.pow(2).sum(dim=2)
lengths = lengths2.sqrt()
x = x * (lengths2 / (1 + lengths2) / lengths).view(x.size(0), x.size(1), 1)
return x
class PrimaryCapsLayerNew(nn.Module):
def __init__(self, input_channels, output_caps, output_dim, kernel_size,
stride):
super(PrimaryCapsLayerNew, self).__init__()
self.conv = nn.Conv2d(input_channels, output_caps * output_dim,
kernel_size=kernel_size, stride=stride)
self.input_channels = input_channels
self.output_caps = output_caps
self.output_dim = output_dim
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| shwetasrsh/MNIST-baselines | PrimaryCapsLayer | false | 16,437 | [
"MIT"
] | 61 | aa888e201a1dddda13e7b278cab8f940d57538db | https://github.com/shwetasrsh/MNIST-baselines/tree/aa888e201a1dddda13e7b278cab8f940d57538db |
NormAttnMap | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/yg/cygpjynbou473tf4noi2cem3yrk3bayubj7lfcuqbdqshgaaj7wb.py
# Topologically Sorted Source Nodes: [max_1, setitem], Original ATen: [aten.max, aten.lift_fresh, aten.index_put]
# Source node to ATen node mapping:
# max_1 => max_1
# setitem => full_default, index_put
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%arg0_1, 1, True), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %index_put : [num_users=2] = call_function[target=torch.ops.aten.index_put_.default](args = (%getitem, [%le], %full_default), kwargs = {})
triton_poi_fused_index_put_lift_fresh_max_0 = async_compile.triton('triton_poi_fused_index_put_lift_fresh_max_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_lift_fresh_max_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = 1.0
tmp8 = tmp6 <= tmp7
tmp9 = tl.where(tmp8, tmp7, tmp6)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/mq/cmqzmtg5i7c3jiw6tatnxnuezvocfhdxvxtgr246t3g35eioloo2.py
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.div]
# Source node to ATen node mapping:
# attn => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %index_put), kwargs = {})
triton_poi_fused_div_1 = async_compile.triton('triton_poi_fused_div_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 / tmp1
tl.store(out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_1, setitem], Original ATen: [aten.max, aten.lift_fresh, aten.index_put]
stream0 = get_raw_stream(0)
triton_poi_fused_index_put_lift_fresh_max_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.div]
triton_poi_fused_div_1.run(arg0_1, buf0, buf1, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf1, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class NormAttnMap(nn.Module):
def __init__(self, norm_type='cossim'):
super(NormAttnMap, self).__init__()
self.norm_type = norm_type
def forward(self, attn_map):
if self.norm_type != 'cosssim':
norm = torch.max(attn_map, dim=1, keepdim=True)[0].detach()
else:
norm = torch.max(torch.abs(attn_map), dim=1, keepdim=True)[0
].detach()
norm[norm <= 1] = 1
attn = attn_map / norm
return attn, norm
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_index_put_lift_fresh_max_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = 1.0
tmp8 = tmp6 <= tmp7
tmp9 = tl.where(tmp8, tmp7, tmp6)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 / tmp1
tl.store(out_ptr0 + x3, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_index_put_lift_fresh_max_0[grid(64)](arg0_1, buf0,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_div_1[grid(256)](arg0_1, buf0, buf1, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg0_1
return buf1, buf0
class NormAttnMapNew(nn.Module):
def __init__(self, norm_type='cossim'):
super(NormAttnMapNew, self).__init__()
self.norm_type = norm_type
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0], output[1]
| sibeiyang/sgmn | NormAttnMap | false | 16,438 | [
"MIT"
] | 130 | 00731b4f2202246d40a36d2a6727c599e6e649aa | https://github.com/sibeiyang/sgmn/tree/00731b4f2202246d40a36d2a6727c599e6e649aa |
SwishX | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/7h/c7hojmlj4yycj63kwnqi2x7j5ce4vpbjhwzmyybmluzqgaeyt6sv.py
# Topologically Sorted Source Nodes: [sigmoid, output, sub, clamp, output_1], Original ATen: [aten.sigmoid, aten.mul, aten.sub, aten.clamp, aten.add, aten.le]
# Source node to ATen node mapping:
# clamp => clamp_max
# output => mul
# output_1 => add
# sigmoid => sigmoid
# sub => sub
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%primals_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %sigmoid), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %primals_2), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%sub, 0.0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%clamp_max, %primals_2), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%sub, 0.0), kwargs = {})
triton_poi_fused_add_clamp_le_mul_sigmoid_sub_0 = async_compile.triton('triton_poi_fused_add_clamp_le_mul_sigmoid_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_le_mul_sigmoid_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_le_mul_sigmoid_sub_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp3 = tl.load(in_ptr1 + (0))
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp1 = tl.sigmoid(tmp0)
tmp2 = tmp0 * tmp1
tmp5 = tmp2 - tmp4
tmp6 = 0.0
tmp7 = triton_helpers.minimum(tmp5, tmp6)
tmp8 = tmp7 + tmp4
tmp9 = tmp5 <= tmp6
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [sigmoid, output, sub, clamp, output_1], Original ATen: [aten.sigmoid, aten.mul, aten.sub, aten.clamp, aten.add, aten.le]
stream0 = get_raw_stream(0)
triton_poi_fused_add_clamp_le_mul_sigmoid_sub_0.run(primals_1, primals_2, buf0, buf1, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_2
return (buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class SwishX(nn.Module):
def __init__(self, maxvalue=2.72):
super(SwishX, self).__init__()
self.maximal = nn.Parameter(torch.FloatTensor([maxvalue]))
def forward(self, x):
output = x * torch.sigmoid(x)
output = output.sub(self.maximal).clamp(max=0.0).add(self.maximal)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_clamp_le_mul_sigmoid_sub_0(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr1 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp1 = tl.sigmoid(tmp0)
tmp2 = tmp0 * tmp1
tmp5 = tmp2 - tmp4
tmp6 = 0.0
tmp7 = triton_helpers.minimum(tmp5, tmp6)
tmp8 = tmp7 + tmp4
tmp9 = tmp5 <= tmp6
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp9, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_add_clamp_le_mul_sigmoid_sub_0[grid(256)](primals_1,
primals_2, buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf0, buf1
class SwishXNew(nn.Module):
def __init__(self, maxvalue=2.72):
super(SwishXNew, self).__init__()
self.maximal = nn.Parameter(torch.FloatTensor([maxvalue]))
def forward(self, input_0):
primals_2 = self.maximal
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| shunya-toyokawa/qanet_human_parts_segmentatiom | SwishX | false | 16,439 | [
"MIT"
] | 72 | 5527b247acd65534b455c26e3692a14b31669602 | https://github.com/shunya-toyokawa/qanet_human_parts_segmentatiom/tree/5527b247acd65534b455c26e3692a14b31669602 |
Anomaly | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/fv/cfvmfhncjkqx2v7rw6mb36tmxqnebq7nnu7nihptqk57t7kgg62s.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_1 => convolution
# x_2 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/uz/cuzbnz5k52xhjpdoaucm3u5qsyj2prasqn3xsty665xnmdng4lmj.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_5 => relu_1
# Graph fragment:
# %relu_1 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/i6/ci6majez2aud55eq5xybhkd2mfzqkgh5gnlnqqt7zfbhwxzfzbtu.py
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_7 => relu_2
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_7), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_2 = async_compile.triton('triton_poi_fused_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4096
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ar/car7npnehgqa6cartjtkz2r24t3hoefata7ryth2h3ampzdx5u62.py
# Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# sigmoid => sigmoid
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_9), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_sigmoid_3 = async_compile.triton('triton_poi_fused_sigmoid_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 1024, 1), (1024, 1, 1))
assert_size_stride(primals_2, (1024, 1024, 1), (1024, 1, 1))
assert_size_stride(primals_3, (1024, ), (1, ))
assert_size_stride(primals_4, (2048, 1024, 1), (1024, 1, 1))
assert_size_stride(primals_5, (2048, ), (1, ))
assert_size_stride(primals_6, (4096, 2048), (2048, 1))
assert_size_stride(primals_7, (4096, ), (1, ))
assert_size_stride(primals_8, (1024, 4096), (4096, 1))
assert_size_stride(primals_9, (1024, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 1024, 1), (1024, 1, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_3, 4096, grid=grid(4096), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 2048, 1), (2048, 1, 1))
buf3 = reinterpret_tensor(buf2, (4, 2048), (2048, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf3, primals_5, 8192, grid=grid(8192), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 4096), (4096, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf3, reinterpret_tensor(primals_6, (2048, 4096), (1, 2048), 0), out=buf4)
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.relu]
triton_poi_fused_relu_2.run(buf5, primals_7, 16384, grid=grid(16384), stream=stream0)
del primals_7
buf6 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf5, reinterpret_tensor(primals_8, (4096, 1024), (1, 4096), 0), out=buf6)
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_3.run(buf7, primals_9, 4096, grid=grid(4096), stream=stream0)
del primals_9
return (buf7, primals_1, primals_2, primals_4, buf1, buf3, buf5, buf7, primals_8, primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1024, 1), (1024, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1024, 1024, 1), (1024, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((2048, 1024, 1), (1024, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4096, 2048), (2048, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1024, 4096), (4096, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
from torch import nn
class Anomaly(nn.Module):
def __init__(self, window=1024):
self.window = window
super(Anomaly, self).__init__()
self.layer1 = nn.Conv1d(window, window, kernel_size=1, stride=1,
padding=0)
self.layer2 = nn.Conv1d(window, 2 * window, kernel_size=1, stride=1,
padding=0)
self.fc1 = nn.Linear(2 * window, 4 * window)
self.fc2 = nn.Linear(4 * window, window)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = x.view(x.size(0), self.window, 1)
x = self.layer1(x)
x = self.relu(x)
x = self.layer2(x)
x = x.view(x.size(0), -1)
x = self.relu(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
return torch.sigmoid(x)
def get_inputs():
return [torch.rand([4, 1024, 1])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4096
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_sigmoid_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 1024, 1), (1024, 1, 1))
assert_size_stride(primals_2, (1024, 1024, 1), (1024, 1, 1))
assert_size_stride(primals_3, (1024,), (1,))
assert_size_stride(primals_4, (2048, 1024, 1), (1024, 1, 1))
assert_size_stride(primals_5, (2048,), (1,))
assert_size_stride(primals_6, (4096, 2048), (2048, 1))
assert_size_stride(primals_7, (4096,), (1,))
assert_size_stride(primals_8, (1024, 4096), (4096, 1))
assert_size_stride(primals_9, (1024,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 1024, 1), (1024, 1, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(4096)](buf1, primals_3,
4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 2048, 1), (2048, 1, 1))
buf3 = reinterpret_tensor(buf2, (4, 2048), (2048, 1), 0)
del buf2
triton_poi_fused_relu_1[grid(8192)](buf3, primals_5, 8192, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 4096), (4096, 1), torch.float32)
extern_kernels.mm(buf3, reinterpret_tensor(primals_6, (2048, 4096),
(1, 2048), 0), out=buf4)
buf5 = buf4
del buf4
triton_poi_fused_relu_2[grid(16384)](buf5, primals_7, 16384, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32)
extern_kernels.mm(buf5, reinterpret_tensor(primals_8, (4096, 1024),
(1, 4096), 0), out=buf6)
buf7 = buf6
del buf6
triton_poi_fused_sigmoid_3[grid(4096)](buf7, primals_9, 4096,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_9
return (buf7, primals_1, primals_2, primals_4, buf1, buf3, buf5, buf7,
primals_8, primals_6)
class AnomalyNew(nn.Module):
def __init__(self, window=1024):
self.window = window
super(AnomalyNew, self).__init__()
self.layer1 = nn.Conv1d(window, window, kernel_size=1, stride=1,
padding=0)
self.layer2 = nn.Conv1d(window, 2 * window, kernel_size=1, stride=1,
padding=0)
self.fc1 = nn.Linear(2 * window, 4 * window)
self.fc2 = nn.Linear(4 * window, window)
self.relu = nn.ReLU(inplace=True)
def forward(self, input_0):
primals_2 = self.layer1.weight
primals_3 = self.layer1.bias
primals_4 = self.layer2.weight
primals_5 = self.layer2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| sccc19/anomalydetector | Anomaly | false | 16,440 | [
"MIT"
] | 180 | a963ef8d7f30971e99d21a748d059e26f2163b09 | https://github.com/sccc19/anomalydetector/tree/a963ef8d7f30971e99d21a748d059e26f2163b09 |
Log10Loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/vj/cvjnwkp2dk2n4r5pk56p2jwi22rcqeox7eojc6ybz5p4z2cppq3d.py
# Topologically Sorted Source Nodes: [log, log_1, sub, truediv, loss], Original ATen: [aten.log, aten.sub, aten.div, aten.abs]
# Source node to ATen node mapping:
# log => log
# log_1 => log_1
# loss => abs_1
# sub => sub
# truediv => div
# Graph fragment:
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%arg0_1,), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%arg1_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%log, %log_1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, 2.302585092994046), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%div,), kwargs = {})
triton_poi_fused_abs_div_log_sub_0 = async_compile.triton('triton_poi_fused_abs_div_log_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_abs_div_log_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_abs_div_log_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask)
tmp1 = tl_math.log(tmp0)
tmp3 = tl_math.log(tmp2)
tmp4 = tmp1 - tmp3
tmp5 = 0.43429448190325176
tmp6 = tmp4 * tmp5
tmp7 = tl_math.abs(tmp6)
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [log, log_1, sub, truediv, loss], Original ATen: [aten.log, aten.sub, aten.div, aten.abs]
stream0 = get_raw_stream(0)
triton_poi_fused_abs_div_log_sub_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
import torch.nn.init
from math import log
def _mask_input(input, mask=None):
if mask is not None:
input = input * mask
count = torch.sum(mask).data[0]
else:
count = np.prod(input.size(), dtype=np.float32).item()
return input, count
class Log10Loss(nn.Module):
def forward(self, input, target, mask=None):
loss = torch.abs((torch.log(target) - torch.log(input)) / log(10))
loss, count = _mask_input(loss, mask)
return torch.sum(loss) / count
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import numpy as np
import torch.nn as nn
import torch.nn.init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_abs_div_log_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp1 = tl_math.log(tmp0)
tmp3 = tl_math.log(tmp2)
tmp4 = tmp1 - tmp3
tmp5 = 0.43429448190325176
tmp6 = tmp4 * tmp5
tmp7 = tl_math.abs(tmp6)
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_abs_div_log_sub_0[grid(256)](arg0_1, arg1_1, buf0,
256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
def _mask_input(input, mask=None):
if mask is not None:
input = input * mask
count = torch.sum(mask).data[0]
else:
count = np.prod(input.size(), dtype=np.float32).item()
return input, count
class Log10LossNew(nn.Module):
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| simonmeister/pytorch-mono-depth | Log10Loss | false | 16,441 | [
"MIT"
] | 56 | 713c70e2fdae6d9d6e0322febadfedcaee9470d3 | https://github.com/simonmeister/pytorch-mono-depth/tree/713c70e2fdae6d9d6e0322febadfedcaee9470d3 |
NormalizationLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/5d/c5dhk7bkuqo4u6hoo6g4wm36lekpofplwrblvshjxay6uhmurm2w.py
# Topologically Sorted Source Nodes: [mul, features], Original ATen: [aten.mul, aten.div]
# Source node to ATen node mapping:
# features => div
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %primals_2), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %expand), kwargs = {})
triton_poi_fused_div_mul_0 = async_compile.triton('triton_poi_fused_div_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + (x3), xmask)
tmp4 = tl.load(in_ptr1 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 * tmp2
tmp5 = tmp4 * tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp13 = tmp12 * tmp12
tmp14 = tmp11 + tmp13
tmp15 = libdevice.sqrt(tmp14)
tmp16 = tmp3 / tmp15
tl.store(out_ptr0 + (x3), tmp16, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1, ), (1, ))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, features], Original ATen: [aten.mul, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_mul_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
return (buf0, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.init
class NormalizationLayer(torch.nn.Module):
"""Class for normalization layer."""
def __init__(self, normalize_scale=1.0, learn_scale=True):
super(NormalizationLayer, self).__init__()
self.norm_s = float(normalize_scale)
if learn_scale:
self.norm_s = torch.nn.Parameter(torch.FloatTensor((self.norm_s,)))
def forward(self, x):
features = self.norm_s * x / torch.norm(x, dim=1, keepdim=True
).expand_as(x)
return features
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn.init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x3, xmask)
tmp4 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 * tmp2
tmp5 = tmp4 * tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp13 = tmp12 * tmp12
tmp14 = tmp11 + tmp13
tmp15 = libdevice.sqrt(tmp14)
tmp16 = tmp3 / tmp15
tl.store(out_ptr0 + x3, tmp16, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_mul_0[grid(256)](primals_1, primals_2, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
return buf0, primals_2
class NormalizationLayerNew(torch.nn.Module):
"""Class for normalization layer."""
def __init__(self, normalize_scale=1.0, learn_scale=True):
super(NormalizationLayerNew, self).__init__()
self.norm_s = float(normalize_scale)
if learn_scale:
self.norm_s = torch.nn.Parameter(torch.FloatTensor((self.norm_s,)))
def forward(self, input_0):
primals_1 = self.norm_s
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| sidphbot/jina-hub | NormalizationLayer | false | 16,442 | [
"Apache-2.0"
] | 106 | ab195030b72353c9b803874e2c99829fb75e1b17 | https://github.com/sidphbot/jina-hub/tree/ab195030b72353c9b803874e2c99829fb75e1b17 |
MaskIOULoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/p3/cp3rlgczdqp27cbq7u456fb24gd36onvg4yriw72uare3co23dqk.py
# Topologically Sorted Source Nodes: [total, max_1, min_1], Original ATen: [aten.stack, aten.max, aten.min]
# Source node to ATen node mapping:
# max_1 => max_1
# min_1 => min_1
# total => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze, %unsqueeze_1], -1), kwargs = {})
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%cat, 2), kwargs = {})
# %min_1 : [num_users=1] = call_function[target=torch.ops.aten.min.dim](args = (%cat, 2), kwargs = {})
triton_poi_fused_max_min_stack_0 = async_compile.triton('triton_poi_fused_max_min_stack_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_min_stack_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_min_stack_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2) % 4
x2 = (xindex // 8)
x3 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x1 + (16*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x1 + (16*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tmp11 = tl.load(in_ptr0 + (4 + x1 + (16*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tl.load(in_ptr1 + (4 + x1 + (16*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = triton_helpers.maximum(tmp10, tmp13)
tmp15 = tl.load(in_ptr0 + (8 + x1 + (16*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr1 + (8 + x1 + (16*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tl.where(tmp4, tmp15, tmp16)
tmp18 = triton_helpers.maximum(tmp14, tmp17)
tmp19 = tl.load(in_ptr0 + (12 + x1 + (16*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tl.load(in_ptr1 + (12 + x1 + (16*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tl.where(tmp4, tmp19, tmp20)
tmp22 = triton_helpers.maximum(tmp18, tmp21)
tmp23 = triton_helpers.minimum(tmp10, tmp13)
tmp24 = triton_helpers.minimum(tmp23, tmp17)
tmp25 = triton_helpers.minimum(tmp24, tmp21)
tl.store(out_ptr0 + (x3), tmp22, xmask)
tl.store(out_ptr1 + (x3), tmp25, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/t2/ct2pvjspcwv4kcntui47tkazly3f5j5gca72ed537bhssto3li6i.py
# Topologically Sorted Source Nodes: [sum_1, sum_2, truediv, loss], Original ATen: [aten.sum, aten.div, aten.log]
# Source node to ATen node mapping:
# loss => log
# sum_1 => sum_1
# sum_2 => sum_2
# truediv => div
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%getitem, [1]), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%getitem_2, [1]), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %sum_2), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%div,), kwargs = {})
triton_poi_fused_div_log_sum_1 = async_compile.triton('triton_poi_fused_div_log_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_log_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_log_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (32*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (8 + x0 + (32*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (16 + x0 + (32*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (24 + x0 + (32*x1)), xmask)
tmp7 = tl.load(in_ptr1 + (x0 + (32*x1)), xmask)
tmp8 = tl.load(in_ptr1 + (8 + x0 + (32*x1)), xmask)
tmp10 = tl.load(in_ptr1 + (16 + x0 + (32*x1)), xmask)
tmp12 = tl.load(in_ptr1 + (24 + x0 + (32*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp9 = tmp7 + tmp8
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp14 = tmp6 / tmp13
tmp15 = tl_math.log(tmp14)
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xb/cxbq43hryomnr3dhkbcdn4sffrvonl3ryppjmvh3cgbtrx5vdk35.py
# Topologically Sorted Source Nodes: [sum_1, sum_2, truediv, loss, loss_1, sum_3], Original ATen: [aten.sum, aten.div, aten.log, aten.mul]
# Source node to ATen node mapping:
# loss => log
# loss_1 => mul
# sum_1 => sum_1
# sum_2 => sum_2
# sum_3 => sum_3
# truediv => div
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%getitem, [1]), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%getitem_2, [1]), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %sum_2), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%div,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%log, %arg2_1), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
triton_per_fused_div_log_mul_sum_2 = async_compile.triton('triton_per_fused_div_log_mul_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 128],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_log_mul_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_log_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 128
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 32
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (r2), None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp5, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 2), (32, 8, 2, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [total, max_1, min_1], Original ATen: [aten.stack, aten.max, aten.min]
stream0 = get_raw_stream(0)
triton_poi_fused_max_min_stack_0.run(arg0_1, arg1_1, buf0, buf1, 128, grid=grid(128), stream=stream0)
del arg0_1
del arg1_1
buf2 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_1, sum_2, truediv, loss], Original ATen: [aten.sum, aten.div, aten.log]
triton_poi_fused_div_log_sum_1.run(buf0, buf1, buf2, 32, grid=grid(32), stream=stream0)
del buf0
del buf1
buf3 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [sum_1, sum_2, truediv, loss, loss_1, sum_3], Original ATen: [aten.sum, aten.div, aten.log, aten.mul]
triton_per_fused_div_log_mul_sum_2.run(buf2, arg2_1, buf3, 1, 128, grid=grid(1), stream=stream0)
del arg2_1
del buf2
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 2), (32, 8, 2, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class MaskIOULoss(nn.Module):
def __init__(self):
super(MaskIOULoss, self).__init__()
def forward(self, pred, target, weight):
total = torch.stack([pred, target], -1)
l_max = total.max(dim=2)[0]
l_min = total.min(dim=2)[0]
loss = (l_max.sum(dim=1) / l_min.sum(dim=1)).log()
loss = loss * weight
return loss.sum()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 2])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_min_stack_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2 % 4
x2 = xindex // 8
x3 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x1 + 16 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp9 = tl.load(in_ptr1 + (x1 + 16 * x2), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tmp11 = tl.load(in_ptr0 + (4 + x1 + 16 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp12 = tl.load(in_ptr1 + (4 + x1 + 16 * x2), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = triton_helpers.maximum(tmp10, tmp13)
tmp15 = tl.load(in_ptr0 + (8 + x1 + 16 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr1 + (8 + x1 + 16 * x2), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp17 = tl.where(tmp4, tmp15, tmp16)
tmp18 = triton_helpers.maximum(tmp14, tmp17)
tmp19 = tl.load(in_ptr0 + (12 + x1 + 16 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp20 = tl.load(in_ptr1 + (12 + x1 + 16 * x2), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp21 = tl.where(tmp4, tmp19, tmp20)
tmp22 = triton_helpers.maximum(tmp18, tmp21)
tmp23 = triton_helpers.minimum(tmp10, tmp13)
tmp24 = triton_helpers.minimum(tmp23, tmp17)
tmp25 = triton_helpers.minimum(tmp24, tmp21)
tl.store(out_ptr0 + x3, tmp22, xmask)
tl.store(out_ptr1 + x3, tmp25, xmask)
@triton.jit
def triton_poi_fused_div_log_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 32 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (8 + x0 + 32 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (16 + x0 + 32 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (24 + x0 + 32 * x1), xmask)
tmp7 = tl.load(in_ptr1 + (x0 + 32 * x1), xmask)
tmp8 = tl.load(in_ptr1 + (8 + x0 + 32 * x1), xmask)
tmp10 = tl.load(in_ptr1 + (16 + x0 + 32 * x1), xmask)
tmp12 = tl.load(in_ptr1 + (24 + x0 + 32 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp9 = tmp7 + tmp8
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp14 = tmp6 / tmp13
tmp15 = tl_math.log(tmp14)
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_per_fused_div_log_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 32
r2 = rindex
tmp0 = tl.load(in_ptr0 + r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + r2, None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 2), (32, 8, 2, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_min_stack_0[grid(128)](arg0_1, arg1_1, buf0,
buf1, 128, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
buf2 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
triton_poi_fused_div_log_sum_1[grid(32)](buf0, buf1, buf2, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del buf0
del buf1
buf3 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_div_log_mul_sum_2[grid(1)](buf2, arg2_1, buf3, 1,
128, XBLOCK=1, num_warps=2, num_stages=1)
del arg2_1
del buf2
return buf3,
class MaskIOULossNew(nn.Module):
def __init__(self):
super(MaskIOULossNew, self).__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| shunya-toyokawa/qanet_human_parts_segmentatiom | MaskIOULoss | false | 16,443 | [
"MIT"
] | 72 | 5527b247acd65534b455c26e3692a14b31669602 | https://github.com/shunya-toyokawa/qanet_human_parts_segmentatiom/tree/5527b247acd65534b455c26e3692a14b31669602 |
LocationEncoder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/uf/cufnedxa7vzq3ip72kyt5bitnvnjk3f5izl22vednvhtwpnahgqy.py
# Topologically Sorted Source Nodes: [bottom_normalized, bottom_normalized_scaled], Original ATen: [aten.div, aten.mul]
# Source node to ATen node mapping:
# bottom_normalized => div
# bottom_normalized_scaled => mul
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %expand), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %primals_2), kwargs = {})
triton_poi_fused_div_mul_0 = async_compile.triton('triton_poi_fused_div_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x3 = (xindex // 80)
x5 = xindex % 20
x0 = xindex % 5
tmp0 = tl.load(in_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x5 + (80*x3)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (20 + x5 + (80*x3)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (40 + x5 + (80*x3)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (60 + x5 + (80*x3)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tmp17 = tmp15 * tmp16
tl.store(out_ptr0 + (x4), tmp17, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 5), (80, 20, 5, 1))
assert_size_stride(primals_2, (1, 5), (5, 1))
assert_size_stride(primals_3, (4, 5), (5, 1))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [bottom_normalized, bottom_normalized_scaled], Original ATen: [aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_div_mul_0.run(primals_1, primals_2, buf0, 320, grid=grid(320), stream=stream0)
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (64, 5), (5, 1), 0), reinterpret_tensor(primals_3, (5, 4), (1, 5), 0), alpha=1, beta=1, out=buf1)
del primals_4
return (reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_1, reinterpret_tensor(buf0, (64, 5), (5, 1), 0), primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 5), (80, 20, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 5), (5, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 5), (5, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class NormalizeScale(nn.Module):
def __init__(self, dim, init_norm=20):
super(NormalizeScale, self).__init__()
self.init_norm = init_norm
self.weight = nn.Parameter(torch.ones(1, dim) * init_norm)
def forward(self, bottom):
bottom_normalized = F.normalize(bottom, p=2, dim=1)
bottom_normalized_scaled = bottom_normalized * self.weight
return bottom_normalized_scaled
class LocationEncoder(nn.Module):
def __init__(self, init_norm, dim):
super(LocationEncoder, self).__init__()
self.lfeat_normalizer = NormalizeScale(5, init_norm)
self.fc = nn.Linear(5, dim)
def forward(self, lfeats):
loc_feat = self.lfeat_normalizer(lfeats)
output = self.fc(loc_feat)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 5])]
def get_init_inputs():
return [[], {'init_norm': 4, 'dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x3 = xindex // 80
x5 = xindex % 20
x0 = xindex % 5
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + (x5 + 80 * x3), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (20 + x5 + 80 * x3), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (40 + x5 + 80 * x3), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (60 + x5 + 80 * x3), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tmp17 = tmp15 * tmp16
tl.store(out_ptr0 + x4, tmp17, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 5), (80, 20, 5, 1))
assert_size_stride(primals_2, (1, 5), (5, 1))
assert_size_stride(primals_3, (4, 5), (5, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_mul_0[grid(320)](primals_1, primals_2, buf0,
320, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (64, 5), (
5, 1), 0), reinterpret_tensor(primals_3, (5, 4), (1, 5), 0),
alpha=1, beta=1, out=buf1)
del primals_4
return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_1, reinterpret_tensor(buf0, (64, 5), (5, 1), 0), primals_3
class NormalizeScale(nn.Module):
def __init__(self, dim, init_norm=20):
super(NormalizeScale, self).__init__()
self.init_norm = init_norm
self.weight = nn.Parameter(torch.ones(1, dim) * init_norm)
def forward(self, bottom):
bottom_normalized = F.normalize(bottom, p=2, dim=1)
bottom_normalized_scaled = bottom_normalized * self.weight
return bottom_normalized_scaled
class LocationEncoderNew(nn.Module):
def __init__(self, init_norm, dim):
super(LocationEncoderNew, self).__init__()
self.lfeat_normalizer = NormalizeScale(5, init_norm)
self.fc = nn.Linear(5, dim)
def forward(self, input_0):
primals_2 = self.lfeat_normalizer.weight
primals_3 = self.fc.weight
primals_4 = self.fc.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| sibeiyang/sgmn | LocationEncoder | false | 16,444 | [
"MIT"
] | 130 | 00731b4f2202246d40a36d2a6727c599e6e649aa | https://github.com/sibeiyang/sgmn/tree/00731b4f2202246d40a36d2a6727c599e6e649aa |
ConvUnit | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/f4/cf4ifsqcqwdpr272fpp3kls2r7dv45llmezneacs7dxjmqv5pu6h.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 460800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 3600) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (32, 256, 5, 5), (6400, 25, 5, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 256, 64, 64), (1048576, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 60, 60), (115200, 3600, 60, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 460800, grid=grid(460800), stream=stream0)
del primals_2
return (buf1, primals_1, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 256, 5, 5), (6400, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 256, 64, 64), (1048576, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class ConvUnit(nn.Module):
def __init__(self):
super(ConvUnit, self).__init__()
self.conv = nn.Conv2d(in_channels=256, out_channels=32, kernel_size
=5, stride=1)
def forward(self, x):
return self.conv(x)
def get_inputs():
return [torch.rand([4, 256, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 3600 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (32, 256, 5, 5), (6400, 25, 5, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 256, 64, 64), (1048576, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 60, 60), (115200, 3600, 60, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(460800)](buf1, primals_2,
460800, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3
class ConvUnitNew(nn.Module):
def __init__(self):
super(ConvUnitNew, self).__init__()
self.conv = nn.Conv2d(in_channels=256, out_channels=32, kernel_size
=5, stride=1)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| shinoyuki222/torch-light | ConvUnit | false | 16,445 | [
"MIT"
] | 310 | 4799805d9bcae82a9f12a574dcf9fdd838c92ee9 | https://github.com/shinoyuki222/torch-light/tree/4799805d9bcae82a9f12a574dcf9fdd838c92ee9 |
RelLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/xv/cxvarvo2hn56e6z3rj3qi7p6nbrxiuvbj6qw3qyksxbfpdw27rvv.py
# Topologically Sorted Source Nodes: [sub, abs_1, loss], Original ATen: [aten.sub, aten.abs, aten.div]
# Source node to ATen node mapping:
# abs_1 => abs_1
# loss => div
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%abs_1, %arg1_1), kwargs = {})
triton_poi_fused_abs_div_sub_0 = async_compile.triton('triton_poi_fused_abs_div_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_abs_div_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_abs_div_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tmp3 / tmp1
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, abs_1, loss], Original ATen: [aten.sub, aten.abs, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_abs_div_sub_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
import torch.nn.init
def _mask_input(input, mask=None):
if mask is not None:
input = input * mask
count = torch.sum(mask).data[0]
else:
count = np.prod(input.size(), dtype=np.float32).item()
return input, count
class RelLoss(nn.Module):
def forward(self, input, target, mask=None):
loss = torch.abs(input - target) / target
loss, count = _mask_input(loss, mask)
return torch.sum(loss) / count
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import numpy as np
import torch.nn as nn
import torch.nn.init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_abs_div_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tmp3 / tmp1
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_abs_div_sub_0[grid(256)](arg0_1, arg1_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
def _mask_input(input, mask=None):
if mask is not None:
input = input * mask
count = torch.sum(mask).data[0]
else:
count = np.prod(input.size(), dtype=np.float32).item()
return input, count
class RelLossNew(nn.Module):
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| simonmeister/pytorch-mono-depth | RelLoss | false | 16,446 | [
"MIT"
] | 56 | 713c70e2fdae6d9d6e0322febadfedcaee9470d3 | https://github.com/simonmeister/pytorch-mono-depth/tree/713c70e2fdae6d9d6e0322febadfedcaee9470d3 |
MergeModule | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/jr/cjrodalf3ceeqlfy7acvbucqx3b3jl7kw6yxinxjxecfhzetqyyh.py
# Topologically Sorted Source Nodes: [mul, sub_attn_map_sum, mul_1, obj_attn_map_sum, add], Original ATen: [aten.mul, aten.sum, aten.add]
# Source node to ATen node mapping:
# add => add
# mul => mul
# mul_1 => mul_1
# obj_attn_map_sum => sum_2
# sub_attn_map_sum => sum_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expand, %arg0_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expand_1, %arg3_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1]), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %sum_2), kwargs = {})
triton_poi_fused_add_mul_sum_0 = async_compile.triton('triton_poi_fused_add_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sum_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x3 = xindex % 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (x3 + (64*x2)), xmask)
tmp6 = tl.load(in_ptr0 + (4 + x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (16 + x3 + (64*x2)), xmask)
tmp12 = tl.load(in_ptr0 + (8 + x1), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (32 + x3 + (64*x2)), xmask)
tmp18 = tl.load(in_ptr0 + (12 + x1), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (48 + x3 + (64*x2)), xmask)
tmp24 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr3 + (x3 + (64*x2)), xmask)
tmp29 = tl.load(in_ptr2 + (4 + x1), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr3 + (16 + x3 + (64*x2)), xmask)
tmp35 = tl.load(in_ptr2 + (8 + x1), xmask, eviction_policy='evict_last')
tmp38 = tl.load(in_ptr3 + (32 + x3 + (64*x2)), xmask)
tmp41 = tl.load(in_ptr2 + (12 + x1), xmask, eviction_policy='evict_last')
tmp44 = tl.load(in_ptr3 + (48 + x3 + (64*x2)), xmask)
tmp1 = 1.0
tmp2 = tmp0 == tmp1
tmp3 = tmp2.to(tl.float32)
tmp5 = tmp3 * tmp4
tmp7 = tmp6 == tmp1
tmp8 = tmp7.to(tl.float32)
tmp10 = tmp8 * tmp9
tmp11 = tmp5 + tmp10
tmp13 = tmp12 == tmp1
tmp14 = tmp13.to(tl.float32)
tmp16 = tmp14 * tmp15
tmp17 = tmp11 + tmp16
tmp19 = tmp18 == tmp1
tmp20 = tmp19.to(tl.float32)
tmp22 = tmp20 * tmp21
tmp23 = tmp17 + tmp22
tmp25 = tmp24 == tmp1
tmp26 = tmp25.to(tl.float32)
tmp28 = tmp26 * tmp27
tmp30 = tmp29 == tmp1
tmp31 = tmp30.to(tl.float32)
tmp33 = tmp31 * tmp32
tmp34 = tmp28 + tmp33
tmp36 = tmp35 == tmp1
tmp37 = tmp36.to(tl.float32)
tmp39 = tmp37 * tmp38
tmp40 = tmp34 + tmp39
tmp42 = tmp41 == tmp1
tmp43 = tmp42.to(tl.float32)
tmp45 = tmp43 * tmp44
tmp46 = tmp40 + tmp45
tmp47 = tmp23 + tmp46
tl.store(out_ptr0 + (x4), tmp47, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/bk/cbkr7e4apb3rdmx2gwwydk7wlxvxklzm3uup3akkpcjtl4xcd6zn.py
# Topologically Sorted Source Nodes: [attn_map_sum, max_1, setitem], Original ATen: [aten.add, aten.max, aten.lift_fresh, aten.index_put]
# Source node to ATen node mapping:
# attn_map_sum => add_1
# max_1 => max_1
# setitem => full_default, index_put
# Graph fragment:
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %arg4_1), kwargs = {})
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%add_1, 1, True), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %index_put : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%getitem, [%le], %full_default), kwargs = {})
triton_poi_fused_add_index_put_lift_fresh_max_1 = async_compile.triton('triton_poi_fused_add_index_put_lift_fresh_max_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_index_put_lift_fresh_max_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_index_put_lift_fresh_max_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + (64*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (16 + x0 + (64*x1)), xmask)
tmp7 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (32 + x0 + (64*x1)), xmask)
tmp11 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (48 + x0 + (64*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = triton_helpers.maximum(tmp2, tmp5)
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp6, tmp9)
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp10, tmp13)
tmp15 = 1.0
tmp16 = tmp14 <= tmp15
tmp17 = tl.where(tmp16, tmp15, tmp14)
tl.store(in_out_ptr0 + (x2), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/t6/ct6ws545hprtb33ro667dno4qgvlbd545cowvrlc3dvlmwidn5l7.py
# Topologically Sorted Source Nodes: [attn_map_sum, attn], Original ATen: [aten.add, aten.div]
# Source node to ATen node mapping:
# attn => div
# attn_map_sum => add_1
# Graph fragment:
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %arg4_1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_1, %index_put), kwargs = {})
triton_poi_fused_add_div_2 = async_compile.triton('triton_poi_fused_add_div_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 64
x4 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x4), xmask)
tmp3 = tl.load(in_ptr2 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 / tmp3
tl.store(out_ptr0 + (x4), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg4_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, sub_attn_map_sum, mul_1, obj_attn_map_sum, add], Original ATen: [aten.mul, aten.sum, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_sum_0.run(arg1_1, arg0_1, arg2_1, arg3_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
buf1 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [attn_map_sum, max_1, setitem], Original ATen: [aten.add, aten.max, aten.lift_fresh, aten.index_put]
triton_poi_fused_add_index_put_lift_fresh_max_1.run(buf2, buf0, arg4_1, 64, grid=grid(64), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_map_sum, attn], Original ATen: [aten.add, aten.div]
triton_poi_fused_add_div_2.run(buf0, arg4_1, buf2, buf3, 256, grid=grid(256), stream=stream0)
del arg4_1
del buf0
del buf2
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg4_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class NormAttnMap(nn.Module):
def __init__(self, norm_type='cossim'):
super(NormAttnMap, self).__init__()
self.norm_type = norm_type
def forward(self, attn_map):
if self.norm_type != 'cosssim':
norm = torch.max(attn_map, dim=1, keepdim=True)[0].detach()
else:
norm = torch.max(torch.abs(attn_map), dim=1, keepdim=True)[0
].detach()
norm[norm <= 1] = 1
attn = attn_map / norm
return attn, norm
class MergeModule(nn.Module):
def __init__(self, norm_type='cossim', need_norm=True):
super(MergeModule, self).__init__()
self.norm_type = norm_type
self.need_norm = need_norm
self.norm_fun = NormAttnMap(norm_type)
def forward(self, attn_map, global_sub_attn_maps, global_obj_attn_maps,
mask_sub, mask_obj):
bs, num_seq, n = global_sub_attn_maps.size(0
), global_sub_attn_maps.size(1), global_sub_attn_maps.size(2)
mask_sub_expand = (mask_sub == 1).float().unsqueeze(2).expand(bs,
num_seq, n)
sub_attn_map_sum = torch.sum(mask_sub_expand * global_sub_attn_maps,
dim=1)
mask_obj_expand = (mask_obj == 1).float().unsqueeze(2).expand(bs,
num_seq, n)
obj_attn_map_sum = torch.sum(mask_obj_expand * global_obj_attn_maps,
dim=1)
attn_map_sum = sub_attn_map_sum + obj_attn_map_sum + attn_map
if self.need_norm:
attn, _norm = self.norm_fun(attn_map_sum)
else:
attn = attn_map_sum
return attn
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_sum_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex % 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (x3 + 64 * x2), xmask)
tmp6 = tl.load(in_ptr0 + (4 + x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (16 + x3 + 64 * x2), xmask)
tmp12 = tl.load(in_ptr0 + (8 + x1), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (32 + x3 + 64 * x2), xmask)
tmp18 = tl.load(in_ptr0 + (12 + x1), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (48 + x3 + 64 * x2), xmask)
tmp24 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr3 + (x3 + 64 * x2), xmask)
tmp29 = tl.load(in_ptr2 + (4 + x1), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr3 + (16 + x3 + 64 * x2), xmask)
tmp35 = tl.load(in_ptr2 + (8 + x1), xmask, eviction_policy='evict_last')
tmp38 = tl.load(in_ptr3 + (32 + x3 + 64 * x2), xmask)
tmp41 = tl.load(in_ptr2 + (12 + x1), xmask, eviction_policy='evict_last')
tmp44 = tl.load(in_ptr3 + (48 + x3 + 64 * x2), xmask)
tmp1 = 1.0
tmp2 = tmp0 == tmp1
tmp3 = tmp2.to(tl.float32)
tmp5 = tmp3 * tmp4
tmp7 = tmp6 == tmp1
tmp8 = tmp7.to(tl.float32)
tmp10 = tmp8 * tmp9
tmp11 = tmp5 + tmp10
tmp13 = tmp12 == tmp1
tmp14 = tmp13.to(tl.float32)
tmp16 = tmp14 * tmp15
tmp17 = tmp11 + tmp16
tmp19 = tmp18 == tmp1
tmp20 = tmp19.to(tl.float32)
tmp22 = tmp20 * tmp21
tmp23 = tmp17 + tmp22
tmp25 = tmp24 == tmp1
tmp26 = tmp25.to(tl.float32)
tmp28 = tmp26 * tmp27
tmp30 = tmp29 == tmp1
tmp31 = tmp30.to(tl.float32)
tmp33 = tmp31 * tmp32
tmp34 = tmp28 + tmp33
tmp36 = tmp35 == tmp1
tmp37 = tmp36.to(tl.float32)
tmp39 = tmp37 * tmp38
tmp40 = tmp34 + tmp39
tmp42 = tmp41 == tmp1
tmp43 = tmp42.to(tl.float32)
tmp45 = tmp43 * tmp44
tmp46 = tmp40 + tmp45
tmp47 = tmp23 + tmp46
tl.store(out_ptr0 + x4, tmp47, xmask)
@triton.jit
def triton_poi_fused_add_index_put_lift_fresh_max_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp7 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp11 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = triton_helpers.maximum(tmp2, tmp5)
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp6, tmp9)
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp10, tmp13)
tmp15 = 1.0
tmp16 = tmp14 <= tmp15
tmp17 = tl.where(tmp16, tmp15, tmp14)
tl.store(in_out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused_add_div_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 64
x4 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x4, xmask)
tmp3 = tl.load(in_ptr2 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 / tmp3
tl.store(out_ptr0 + x4, tmp4, xmask)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg4_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_sum_0[grid(64)](arg1_1, arg0_1, arg2_1,
arg3_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
buf1 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf2 = buf1
del buf1
triton_poi_fused_add_index_put_lift_fresh_max_1[grid(64)](buf2,
buf0, arg4_1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_2[grid(256)](buf0, arg4_1, buf2, buf3, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg4_1
del buf0
del buf2
return buf3,
class NormAttnMap(nn.Module):
def __init__(self, norm_type='cossim'):
super(NormAttnMap, self).__init__()
self.norm_type = norm_type
def forward(self, attn_map):
if self.norm_type != 'cosssim':
norm = torch.max(attn_map, dim=1, keepdim=True)[0].detach()
else:
norm = torch.max(torch.abs(attn_map), dim=1, keepdim=True)[0
].detach()
norm[norm <= 1] = 1
attn = attn_map / norm
return attn, norm
class MergeModuleNew(nn.Module):
def __init__(self, norm_type='cossim', need_norm=True):
super(MergeModuleNew, self).__init__()
self.norm_type = norm_type
self.need_norm = need_norm
self.norm_fun = NormAttnMap(norm_type)
def forward(self, input_0, input_1, input_2, input_3, input_4):
arg0_1 = input_0
arg3_1 = input_1
arg4_1 = input_2
arg1_1 = input_3
arg2_1 = input_4
output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1])
return output[0]
| sibeiyang/sgmn | MergeModule | false | 16,447 | [
"MIT"
] | 130 | 00731b4f2202246d40a36d2a6727c599e6e649aa | https://github.com/sibeiyang/sgmn/tree/00731b4f2202246d40a36d2a6727c599e6e649aa |
GSympNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/yc/cyccry4agz6fho3j2txqcq7s6w3cbxqp25s3bjf2yb3dpe6q5sgt.py
# Topologically Sorted Source Nodes: [add, sigmoid, mul], Original ATen: [aten.add, aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# add => add
# mul => mul
# sigmoid => sigmoid
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_3), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %primals_4), kwargs = {})
triton_poi_fused_add_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1280
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 20
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tmp5 = tmp3 * tmp4
tl.store(out_ptr0 + (x2), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ts/ctshmjxiubxutvmsloys6a5fvrpex5qymvu4frsjsx7ayrcuit7l.py
# Topologically Sorted Source Nodes: [p_1], Original ATen: [aten.add]
# Source node to ATen node mapping:
# p_1 => add_1
# Graph fragment:
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_1, %view_3), kwargs = {})
triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x1)), xmask)
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/am/camcqc6ipaoh7nku3qgz73pdjidzdm2fc272fuxfzghsxiz4hdj7.py
# Topologically Sorted Source Nodes: [q_1], Original ATen: [aten.add]
# Source node to ATen node mapping:
# q_1 => add_3
# Graph fragment:
# %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_7, %slice_2), kwargs = {})
triton_poi_fused_add_2 = async_compile.triton('triton_poi_fused_add_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
x1 = (xindex // 2)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (2 + x0 + (4*x1)), xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/3k/c3ko5okkcgntk4ai7xt7lmo3cbvxnljnnkgvfs2jo2kprky5lwqc.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%add_5, %add_3], -1), kwargs = {})
triton_poi_fused_cat_3 = async_compile.triton('triton_poi_fused_cat_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((2*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + ((2*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 4, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tl.load(in_ptr2 + ((2*x1) + ((-2) + x0)), tmp10 & xmask, eviction_policy='evict_last', other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + (x2), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 20), (20, 1))
assert_size_stride(primals_3, (20, ), (1, ))
assert_size_stride(primals_4, (20, ), (1, ))
assert_size_stride(primals_5, (2, 20), (20, 1))
assert_size_stride(primals_6, (20, ), (1, ))
assert_size_stride(primals_7, (20, ), (1, ))
assert_size_stride(primals_8, (2, 20), (20, 1))
assert_size_stride(primals_9, (20, ), (1, ))
assert_size_stride(primals_10, (20, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 20), (20, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 2), (4, 1), 2), primals_2, out=buf0)
buf1 = empty_strided_cuda((4, 4, 4, 20), (320, 80, 20, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, sigmoid, mul], Original ATen: [aten.add, aten.sigmoid, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_0.run(buf0, primals_3, primals_4, buf1, 1280, grid=grid(1280), stream=stream0)
buf2 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [gradH], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf1, (64, 20), (20, 1), 0), reinterpret_tensor(primals_2, (20, 2), (1, 20), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 2), (32, 8, 2, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [p_1], Original ATen: [aten.add]
triton_poi_fused_add_1.run(buf3, primals_1, 128, grid=grid(128), stream=stream0)
buf4 = empty_strided_cuda((64, 20), (20, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf3, (64, 2), (2, 1), 0), primals_5, out=buf4)
buf5 = empty_strided_cuda((4, 4, 4, 20), (320, 80, 20, 1), torch.float32)
# Topologically Sorted Source Nodes: [add_2, sigmoid_1, mul_2], Original ATen: [aten.add, aten.sigmoid, aten.mul]
triton_poi_fused_add_mul_sigmoid_0.run(buf4, primals_6, primals_7, buf5, 1280, grid=grid(1280), stream=stream0)
buf6 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [gradH_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf5, (64, 20), (20, 1), 0), reinterpret_tensor(primals_5, (20, 2), (1, 20), 0), out=buf6)
buf7 = reinterpret_tensor(buf6, (4, 4, 4, 2), (32, 8, 2, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [q_1], Original ATen: [aten.add]
triton_poi_fused_add_2.run(buf7, primals_1, 128, grid=grid(128), stream=stream0)
buf8 = empty_strided_cuda((64, 20), (20, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_4], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf7, (64, 2), (2, 1), 0), primals_8, out=buf8)
buf9 = empty_strided_cuda((4, 4, 4, 20), (320, 80, 20, 1), torch.float32)
# Topologically Sorted Source Nodes: [add_4, sigmoid_2, mul_4], Original ATen: [aten.add, aten.sigmoid, aten.mul]
triton_poi_fused_add_mul_sigmoid_0.run(buf8, primals_9, primals_10, buf9, 1280, grid=grid(1280), stream=stream0)
buf10 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [gradH_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf9, (64, 20), (20, 1), 0), reinterpret_tensor(primals_8, (20, 2), (1, 20), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
triton_poi_fused_cat_3.run(buf3, buf10, buf7, buf11, 256, grid=grid(256), stream=stream0)
del buf10
return (buf11, primals_3, primals_4, primals_6, primals_7, primals_9, primals_10, buf0, reinterpret_tensor(buf1, (64, 20), (20, 1), 0), buf4, reinterpret_tensor(primals_5, (20, 2), (1, 20), 0), reinterpret_tensor(buf5, (64, 20), (20, 1), 0), buf8, reinterpret_tensor(primals_8, (20, 2), (1, 20), 0), reinterpret_tensor(buf9, (64, 20), (20, 1), 0), reinterpret_tensor(buf7, (2, 64), (1, 2), 0), reinterpret_tensor(buf3, (2, 64), (1, 2), 0), primals_2, reinterpret_tensor(primals_1, (2, 64), (1, 4), 2), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((2, 20), (20, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((2, 20), (20, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((2, 20), (20, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import torch
import torch.nn as nn
class Module(torch.nn.Module):
"""Standard module format.
"""
def __init__(self):
super(Module, self).__init__()
self.activation = None
self.initializer = None
self.__device = None
self.__dtype = None
@property
def device(self):
return self.__device
@property
def dtype(self):
return self.__dtype
@device.setter
def device(self, d):
if d == 'cpu':
self.cpu()
elif d == 'gpu':
self
else:
raise ValueError
self.__device = d
@dtype.setter
def dtype(self, d):
if d == 'float':
self
elif d == 'double':
self
else:
raise ValueError
self.__dtype = d
@property
def Device(self):
if self.__device == 'cpu':
return torch.device('cpu')
elif self.__device == 'gpu':
return torch.device('cuda')
@property
def Dtype(self):
if self.__dtype == 'float':
return torch.float32
elif self.__dtype == 'double':
return torch.float64
@property
def act(self):
if self.activation == 'sigmoid':
return torch.sigmoid
elif self.activation == 'relu':
return torch.relu
elif self.activation == 'tanh':
return torch.tanh
elif self.activation == 'elu':
return torch.elu
else:
raise NotImplementedError
@property
def Act(self):
if self.activation == 'sigmoid':
return torch.nn.Sigmoid()
elif self.activation == 'relu':
return torch.nn.ReLU()
elif self.activation == 'tanh':
return torch.nn.Tanh()
elif self.activation == 'elu':
return torch.nn.ELU()
else:
raise NotImplementedError
@property
def weight_init_(self):
if self.initializer == 'He normal':
return torch.nn.init.kaiming_normal_
elif self.initializer == 'He uniform':
return torch.nn.init.kaiming_uniform_
elif self.initializer == 'Glorot normal':
return torch.nn.init.xavier_normal_
elif self.initializer == 'Glorot uniform':
return torch.nn.init.xavier_uniform_
elif self.initializer == 'orthogonal':
return torch.nn.init.orthogonal_
elif self.initializer == 'default':
if self.activation == 'relu':
return torch.nn.init.kaiming_normal_
elif self.activation == 'tanh':
return torch.nn.init.orthogonal_
else:
return lambda x: None
else:
raise NotImplementedError
class StructureNN(Module):
"""Structure-oriented neural network used as a general map based on designing architecture.
"""
def __init__(self):
super(StructureNN, self).__init__()
def predict(self, x, returnnp=False):
if not isinstance(x, torch.Tensor):
x = torch.tensor(x, dtype=self.Dtype, device=self.Device)
return self(x).cpu().detach().numpy() if returnnp else self(x)
class GradientModule(Module):
"""Gradient symplectic module.
"""
def __init__(self, dim, width, activation, mode):
super(GradientModule, self).__init__()
self.dim = dim
self.width = width
self.activation = activation
self.mode = mode
self.params = self.__init_params()
def forward(self, pqh):
p, q, h = pqh
if self.mode == 'up':
gradH = self.act(q @ self.params['K'] + self.params['b']
) * self.params['a'] @ self.params['K'].t()
return p + gradH * h, q
elif self.mode == 'low':
gradH = self.act(p @ self.params['K'] + self.params['b']
) * self.params['a'] @ self.params['K'].t()
return p, gradH * h + q
else:
raise ValueError
def __init_params(self):
d = int(self.dim / 2)
params = nn.ParameterDict()
params['K'] = nn.Parameter((torch.randn([d, self.width]) * 0.01).
requires_grad_(True))
params['a'] = nn.Parameter((torch.randn([self.width]) * 0.01).
requires_grad_(True))
params['b'] = nn.Parameter(torch.zeros([self.width]).requires_grad_
(True))
return params
class SympNet(StructureNN):
def __init__(self):
super(SympNet, self).__init__()
self.dim = None
def predict(self, xh, steps=1, keepinitx=False, returnnp=False):
dim = xh.size(-1)
size = len(xh.size())
if dim == self.dim:
pred = [xh]
for _ in range(steps):
pred.append(self(pred[-1]))
else:
x0, h = xh[..., :-1], xh[..., -1:]
pred = [x0]
for _ in range(steps):
pred.append(self(torch.cat([pred[-1], h], dim=-1)))
if keepinitx:
steps = steps + 1
else:
pred = pred[1:]
res = torch.cat(pred, dim=-1).view([-1, steps, self.dim][2 - size:])
return res.cpu().detach().numpy() if returnnp else res
class GSympNet(SympNet):
"""G-SympNet.
Input: [num, dim] or [num, dim + 1]
Output: [num, dim]
"""
def __init__(self, dim, layers=3, width=20, activation='sigmoid'):
super(GSympNet, self).__init__()
self.dim = dim
self.layers = layers
self.width = width
self.activation = activation
self.modus = self.__init_modules()
def forward(self, pqh):
d = int(self.dim / 2)
if pqh.size(-1) == self.dim + 1:
p, q, h = pqh[..., :d], pqh[..., d:-1], pqh[..., -1:]
elif pqh.size(-1) == self.dim:
p, q, h = pqh[..., :d], pqh[..., d:], torch.ones_like(pqh[..., -1:]
)
else:
raise ValueError
for i in range(self.layers):
GradM = self.modus['GradM{}'.format(i + 1)]
p, q = GradM([p, q, h])
return torch.cat([p, q], dim=-1)
def __init_modules(self):
modules = nn.ModuleDict()
for i in range(self.layers):
mode = 'up' if i % 2 == 0 else 'low'
modules['GradM{}'.format(i + 1)] = GradientModule(self.dim,
self.width, self.activation, mode)
return modules
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_sigmoid_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1280
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 20
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tmp5 = tmp3 * tmp4
tl.store(out_ptr0 + x2, tmp5, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1), xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
x1 = xindex // 2
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + (2 + x0 + 4 * x1), xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (2 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (2 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 4, tl.int64)
tmp13 = tl.load(in_ptr2 + (2 * x1 + (-2 + x0)), tmp10 & xmask,
eviction_policy='evict_last', other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + x2, tmp14, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 20), (20, 1))
assert_size_stride(primals_3, (20,), (1,))
assert_size_stride(primals_4, (20,), (1,))
assert_size_stride(primals_5, (2, 20), (20, 1))
assert_size_stride(primals_6, (20,), (1,))
assert_size_stride(primals_7, (20,), (1,))
assert_size_stride(primals_8, (2, 20), (20, 1))
assert_size_stride(primals_9, (20,), (1,))
assert_size_stride(primals_10, (20,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 20), (20, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 2), (4, 1), 2),
primals_2, out=buf0)
buf1 = empty_strided_cuda((4, 4, 4, 20), (320, 80, 20, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_0[grid(1280)](buf0, primals_3,
primals_4, buf1, 1280, XBLOCK=128, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 20), (20, 1), 0),
reinterpret_tensor(primals_2, (20, 2), (1, 20), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 2), (32, 8, 2, 1), 0)
del buf2
triton_poi_fused_add_1[grid(128)](buf3, primals_1, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((64, 20), (20, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 2), (2, 1), 0),
primals_5, out=buf4)
buf5 = empty_strided_cuda((4, 4, 4, 20), (320, 80, 20, 1), torch.
float32)
triton_poi_fused_add_mul_sigmoid_0[grid(1280)](buf4, primals_6,
primals_7, buf5, 1280, XBLOCK=128, num_warps=4, num_stages=1)
buf6 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (64, 20), (20, 1), 0),
reinterpret_tensor(primals_5, (20, 2), (1, 20), 0), out=buf6)
buf7 = reinterpret_tensor(buf6, (4, 4, 4, 2), (32, 8, 2, 1), 0)
del buf6
triton_poi_fused_add_2[grid(128)](buf7, primals_1, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((64, 20), (20, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (64, 2), (2, 1), 0),
primals_8, out=buf8)
buf9 = empty_strided_cuda((4, 4, 4, 20), (320, 80, 20, 1), torch.
float32)
triton_poi_fused_add_mul_sigmoid_0[grid(1280)](buf8, primals_9,
primals_10, buf9, 1280, XBLOCK=128, num_warps=4, num_stages=1)
buf10 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf9, (64, 20), (20, 1), 0),
reinterpret_tensor(primals_8, (20, 2), (1, 20), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_cat_3[grid(256)](buf3, buf10, buf7, buf11, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del buf10
return (buf11, primals_3, primals_4, primals_6, primals_7, primals_9,
primals_10, buf0, reinterpret_tensor(buf1, (64, 20), (20, 1), 0),
buf4, reinterpret_tensor(primals_5, (20, 2), (1, 20), 0),
reinterpret_tensor(buf5, (64, 20), (20, 1), 0), buf8,
reinterpret_tensor(primals_8, (20, 2), (1, 20), 0),
reinterpret_tensor(buf9, (64, 20), (20, 1), 0), reinterpret_tensor(
buf7, (2, 64), (1, 2), 0), reinterpret_tensor(buf3, (2, 64), (1, 2),
0), primals_2, reinterpret_tensor(primals_1, (2, 64), (1, 4), 2))
class Module(torch.nn.Module):
"""Standard module format.
"""
def __init__(self):
super(Module, self).__init__()
self.activation = None
self.initializer = None
self.__device = None
self.__dtype = None
@property
def device(self):
return self.__device
@property
def dtype(self):
return self.__dtype
@device.setter
def device(self, d):
if d == 'cpu':
self.cpu()
elif d == 'gpu':
self
else:
raise ValueError
self.__device = d
@dtype.setter
def dtype(self, d):
if d == 'float':
self
elif d == 'double':
self
else:
raise ValueError
self.__dtype = d
@property
def Device(self):
if self.__device == 'cpu':
return torch.device('cpu')
elif self.__device == 'gpu':
return torch.device('cuda')
@property
def Dtype(self):
if self.__dtype == 'float':
return torch.float32
elif self.__dtype == 'double':
return torch.float64
@property
def act(self):
if self.activation == 'sigmoid':
return torch.sigmoid
elif self.activation == 'relu':
return torch.relu
elif self.activation == 'tanh':
return torch.tanh
elif self.activation == 'elu':
return torch.elu
else:
raise NotImplementedError
@property
def Act(self):
if self.activation == 'sigmoid':
return torch.nn.Sigmoid()
elif self.activation == 'relu':
return torch.nn.ReLU()
elif self.activation == 'tanh':
return torch.nn.Tanh()
elif self.activation == 'elu':
return torch.nn.ELU()
else:
raise NotImplementedError
@property
def weight_init_(self):
if self.initializer == 'He normal':
return torch.nn.init.kaiming_normal_
elif self.initializer == 'He uniform':
return torch.nn.init.kaiming_uniform_
elif self.initializer == 'Glorot normal':
return torch.nn.init.xavier_normal_
elif self.initializer == 'Glorot uniform':
return torch.nn.init.xavier_uniform_
elif self.initializer == 'orthogonal':
return torch.nn.init.orthogonal_
elif self.initializer == 'default':
if self.activation == 'relu':
return torch.nn.init.kaiming_normal_
elif self.activation == 'tanh':
return torch.nn.init.orthogonal_
else:
return lambda x: None
else:
raise NotImplementedError
class StructureNN(Module):
"""Structure-oriented neural network used as a general map based on designing architecture.
"""
def __init__(self):
super(StructureNN, self).__init__()
def predict(self, x, returnnp=False):
if not isinstance(x, torch.Tensor):
x = torch.tensor(x, dtype=self.Dtype, device=self.Device)
return self(x).cpu().detach().numpy() if returnnp else self(x)
class GradientModule(Module):
"""Gradient symplectic module.
"""
def __init__(self, dim, width, activation, mode):
super(GradientModule, self).__init__()
self.dim = dim
self.width = width
self.activation = activation
self.mode = mode
self.params = self.__init_params()
def forward(self, pqh):
p, q, h = pqh
if self.mode == 'up':
gradH = self.act(q @ self.params['K'] + self.params['b']
) * self.params['a'] @ self.params['K'].t()
return p + gradH * h, q
elif self.mode == 'low':
gradH = self.act(p @ self.params['K'] + self.params['b']
) * self.params['a'] @ self.params['K'].t()
return p, gradH * h + q
else:
raise ValueError
def __init_params(self):
d = int(self.dim / 2)
params = nn.ParameterDict()
params['K'] = nn.Parameter((torch.randn([d, self.width]) * 0.01).
requires_grad_(True))
params['a'] = nn.Parameter((torch.randn([self.width]) * 0.01).
requires_grad_(True))
params['b'] = nn.Parameter(torch.zeros([self.width]).requires_grad_
(True))
return params
class SympNet(StructureNN):
def __init__(self):
super(SympNet, self).__init__()
self.dim = None
def predict(self, xh, steps=1, keepinitx=False, returnnp=False):
dim = xh.size(-1)
size = len(xh.size())
if dim == self.dim:
pred = [xh]
for _ in range(steps):
pred.append(self(pred[-1]))
else:
x0, h = xh[..., :-1], xh[..., -1:]
pred = [x0]
for _ in range(steps):
pred.append(self(torch.cat([pred[-1], h], dim=-1)))
if keepinitx:
steps = steps + 1
else:
pred = pred[1:]
res = torch.cat(pred, dim=-1).view([-1, steps, self.dim][2 - size:])
return res.cpu().detach().numpy() if returnnp else res
class GSympNetNew(SympNet):
"""G-SympNet.
Input: [num, dim] or [num, dim + 1]
Output: [num, dim]
"""
def __init__(self, dim, layers=3, width=20, activation='sigmoid'):
super(GSympNetNew, self).__init__()
self.dim = dim
self.layers = layers
self.width = width
self.activation = activation
self.modus = self.__init_modules()
def __init_modules(self):
modules = nn.ModuleDict()
for i in range(self.layers):
mode = 'up' if i % 2 == 0 else 'low'
modules['GradM{}'.format(i + 1)] = GradientModule(self.dim,
self.width, self.activation, mode)
return modules
def forward(self, input_0):
primals_2 = self.modus.GradM1.params.K
primals_3 = self.modus.GradM1.params.a
primals_4 = self.modus.GradM1.params.b
primals_5 = self.modus.GradM2.params.K
primals_6 = self.modus.GradM2.params.a
primals_7 = self.modus.GradM2.params.b
primals_8 = self.modus.GradM3.params.K
primals_9 = self.modus.GradM3.params.a
primals_10 = self.modus.GradM3.params.b
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
| shushu-qin/deeponet | GSympNet | false | 16,448 | [
"Apache-2.0"
] | 140 | 5bbe066279bba055ad80e04c364140363c87634a | https://github.com/shushu-qin/deeponet/tree/5bbe066279bba055ad80e04c364140363c87634a |
_TransitionUp | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/7u/c7uhkvgwxg2by2iylt2a32rce6cpfi4djnsc523l63tuabi6td67.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution, %primals_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 8
x0 = xindex % 16
x2 = (xindex // 128)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = tl.load(in_ptr1 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 8, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tl.load(in_ptr2 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp10 & xmask, other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + (x3), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [up], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_3, stride=(2, 2), padding=(3, 3), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(buf0, primals_4, primals_2, buf1, 512, grid=grid(512), stream=stream0)
del buf0
del primals_2
del primals_4
return (buf1, primals_1, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.init
class _TransitionUp(nn.Module):
def __init__(self, num_features):
super().__init__()
self.deconv = nn.ConvTranspose2d(num_features, num_features,
kernel_size=3, stride=2, padding=1)
def forward(self, x, skip):
self.deconv.padding = ((x.size(2) - 1) * self.deconv.stride[0] -
skip.size(2) + self.deconv.kernel_size[0] + 1) // 2, ((x.size(3
) - 1) * self.deconv.stride[1] - skip.size(3) + self.deconv.
kernel_size[1] + 1) // 2
up = self.deconv(x, output_size=skip.size())
return torch.cat([up, skip], 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp13 = tl.load(in_ptr2 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp10 &
xmask, other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + x3, tmp14, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_3, stride=(2,
2), padding=(3, 3), dilation=(1, 1), transposed=True,
output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](buf0, primals_4, primals_2, buf1,
512, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_2
del primals_4
return buf1, primals_1, primals_3
class _TransitionUpNew(nn.Module):
def __init__(self, num_features):
super().__init__()
self.deconv = nn.ConvTranspose2d(num_features, num_features,
kernel_size=3, stride=2, padding=1)
def forward(self, input_0, input_1):
primals_3 = self.deconv.weight
primals_4 = self.deconv.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| simonmeister/pytorch-mono-depth | _TransitionUp | false | 16,449 | [
"MIT"
] | 56 | 713c70e2fdae6d9d6e0322febadfedcaee9470d3 | https://github.com/simonmeister/pytorch-mono-depth/tree/713c70e2fdae6d9d6e0322febadfedcaee9470d3 |
Downsample | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ud/cudtupp4xbsxvl5czwt3p2pj3cknjnhtp6x45zymsucnyg3xzdnf.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_1 => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [4, 4], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask)
tl.store(out_ptr0 + (y0 + (4*x2) + (64*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/tc/ctcagp37ljugm52zu6ckorigrppqo67voefe2f2odg5r6hyllhyu.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_1 => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [4, 4], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(primals_2, buf0, 16, 16, grid=grid(16, 16), stream=stream0)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 4, 4, 4), (64, 1, 16, 4), 0), buf0, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 4, 4))
del buf0
buf2 = reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 16, 16), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_3, 16, grid=grid(16), stream=stream0)
del primals_3
return (reinterpret_tensor(buf2, (4, 1, 1, 4), (4, 4, 4, 1), 0), primals_2, reinterpret_tensor(primals_1, (4, 4, 4, 4), (64, 1, 16, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import Tensor
from torch import nn
class Downsample(nn.Module):
def __init__(self, c1, c2, patch_size):
super().__init__()
self.proj = nn.Conv2d(c1, c2, patch_size, patch_size)
def forward(self, x: 'Tensor') ->Tensor:
x = x.permute(0, 3, 1, 2)
x = self.proj(x)
x = x.permute(0, 2, 3, 1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'c1': 4, 'c2': 4, 'patch_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask)
tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 16)](primals_2, buf0, 16,
16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4,
4, 4, 4), (64, 1, 16, 4), 0), buf0, stride=(4, 4), padding=(0,
0), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 4, 4))
del buf0
buf2 = reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 16, 16), 0)
del buf1
triton_poi_fused_convolution_1[grid(16)](buf2, primals_3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
return reinterpret_tensor(buf2, (4, 1, 1, 4), (4, 4, 4, 1), 0
), primals_2, reinterpret_tensor(primals_1, (4, 4, 4, 4), (64, 1,
16, 4), 0)
class DownsampleNew(nn.Module):
def __init__(self, c1, c2, patch_size):
super().__init__()
self.proj = nn.Conv2d(c1, c2, patch_size, patch_size)
def forward(self, input_0):
primals_1 = self.proj.weight
primals_3 = self.proj.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| sithu31296/image_classification | Downsample | false | 16,450 | [
"MIT"
] | 57 | 6b8cbce96100225621cee3166a73e852ba216cc3 | https://github.com/sithu31296/image_classification/tree/6b8cbce96100225621cee3166a73e852ba216cc3 |
SAGEConv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/mb/cmbvzrmlik5p447ffabi325kbp6eve6vcn7hhpfb6chazehjyegc.py
# Topologically Sorted Source Nodes: [x, x_], Original ATen: [aten.relu, aten.pow]
# Source node to ATen node mapping:
# x => relu
# x_ => pow_1
# Graph fragment:
# %add_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_2), kwargs = {})
# %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_2,), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%relu, 2.0), kwargs = {})
triton_poi_fused_pow_relu_0 = async_compile.triton('triton_poi_fused_pow_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_pow_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_pow_relu_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tmp4 * tmp4
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/64/c64xfffgdjojgwtqipvz3kjeijszfmu4w6thlvqjcz2kkqydpjhi.py
# Topologically Sorted Source Nodes: [x__1], Original ATen: [aten.pow]
# Source node to ATen node mapping:
# x__1 => pow_2
# Graph fragment:
# %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mm, 0.5), kwargs = {})
triton_poi_fused_pow_1 = async_compile.triton('triton_poi_fused_pow_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_pow_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_pow_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = libdevice.sqrt(tmp0)
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ou/couousup2n7bhgx7kcqkuwh7icfhsuwbegwhx4ny2cpqpxbc6bat.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x_2 => add
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_6), kwargs = {})
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_8), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_tensor_1, %add_tensor), kwargs = {})
triton_poi_fused_add_2 = async_compile.triton('triton_poi_fused_add_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, x_], Original ATen: [aten.relu, aten.pow]
stream0 = get_raw_stream(0)
triton_poi_fused_pow_relu_0.run(buf1, primals_2, buf2, 16, grid=grid(16), stream=stream0)
del primals_2
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_, spmm], Original ATen: [aten.pow, aten.mm]
extern_kernels.mm(primals_4, buf2, out=buf3)
buf4 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x__1], Original ATen: [aten.pow]
triton_poi_fused_pow_1.run(buf3, buf4, 16, grid=grid(16), stream=stream0)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf1, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf4, reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf6)
buf7 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.add]
triton_poi_fused_add_2.run(buf7, primals_6, buf6, primals_8, 16, grid=grid(16), stream=stream0)
del buf6
del primals_6
del primals_8
return (buf7, primals_3, buf1, buf3, buf4, primals_7, primals_5, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
class SAGEConv(nn.Module):
"""
Description
-----------
SAGE convolutional layer.
Parameters
----------
in_features : int
Dimension of input features.
pool_features : int
Dimension of pooling features.
out_features : int
Dimension of output features.
activation : func of torch.nn.functional, optional
Activation function. Default: ``None``.
dropout : float, optional
Rate of dropout. Default: ``0.0``.
mu : float, optional
Hyper-parameter, refer to original paper. Default: ``2.0``.
"""
def __init__(self, in_features, pool_features, out_features, activation
=None, mu=2.0, dropout=0.0):
super(SAGEConv, self).__init__()
self.pool_layer = nn.Linear(in_features, pool_features)
self.linear1 = nn.Linear(pool_features, out_features)
self.linear2 = nn.Linear(pool_features, out_features)
self.activation = activation
self.mu = mu
if dropout > 0.0:
self.dropout = nn.Dropout(dropout)
else:
self.dropout = None
self.reset_parameters()
def reset_parameters(self):
"""Reset parameters."""
if self.activation == F.leaky_relu:
gain = nn.init.calculate_gain('leaky_relu')
else:
gain = nn.init.calculate_gain('relu')
nn.init.xavier_normal_(self.linear1.weight, gain=gain)
nn.init.xavier_normal_(self.linear2.weight, gain=gain)
nn.init.xavier_normal_(self.pool_layer.weight, gain=gain)
def forward(self, x, adj):
"""
Parameters
----------
x : torch.Tensor
Tensor of input features.
adj : torch.SparseTensor
Sparse tensor of adjacency matrix.
Returns
-------
x : torch.Tensor
Output of layer.
"""
x = F.relu(self.pool_layer(x))
x_ = x ** self.mu
x_ = torch.spmm(adj, x_) ** (1 / self.mu)
x = self.linear1(x)
x_ = self.linear2(x_)
x = x + x_
if self.activation is not None:
x = self.activation(x)
if self.dropout is not None:
x = self.dropout(x)
return x
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'pool_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn.functional as F
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_pow_relu_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tmp4 * tmp4
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp5, xmask)
@triton.jit
def triton_poi_fused_pow_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.sqrt(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 4),
(1, 4), 0), out=buf0)
del primals_1
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_pow_relu_0[grid(16)](buf1, primals_2, buf2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_4, buf2, out=buf3)
buf4 = buf2
del buf2
triton_poi_fused_pow_1[grid(16)](buf3, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_5, (4, 4), (1, 4
), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf4, reinterpret_tensor(primals_7, (4, 4), (1, 4
), 0), out=buf6)
buf7 = buf5
del buf5
triton_poi_fused_add_2[grid(16)](buf7, primals_6, buf6, primals_8,
16, XBLOCK=16, num_warps=1, num_stages=1)
del buf6
del primals_6
del primals_8
return (buf7, primals_3, buf1, buf3, buf4, primals_7, primals_5,
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0))
class SAGEConvNew(nn.Module):
"""
Description
-----------
SAGE convolutional layer.
Parameters
----------
in_features : int
Dimension of input features.
pool_features : int
Dimension of pooling features.
out_features : int
Dimension of output features.
activation : func of torch.nn.functional, optional
Activation function. Default: ``None``.
dropout : float, optional
Rate of dropout. Default: ``0.0``.
mu : float, optional
Hyper-parameter, refer to original paper. Default: ``2.0``.
"""
def __init__(self, in_features, pool_features, out_features, activation
=None, mu=2.0, dropout=0.0):
super(SAGEConvNew, self).__init__()
self.pool_layer = nn.Linear(in_features, pool_features)
self.linear1 = nn.Linear(pool_features, out_features)
self.linear2 = nn.Linear(pool_features, out_features)
self.activation = activation
self.mu = mu
if dropout > 0.0:
self.dropout = nn.Dropout(dropout)
else:
self.dropout = None
self.reset_parameters()
def reset_parameters(self):
"""Reset parameters."""
if self.activation == F.leaky_relu:
gain = nn.init.calculate_gain('leaky_relu')
else:
gain = nn.init.calculate_gain('relu')
nn.init.xavier_normal_(self.linear1.weight, gain=gain)
nn.init.xavier_normal_(self.linear2.weight, gain=gain)
nn.init.xavier_normal_(self.pool_layer.weight, gain=gain)
def forward(self, input_0, input_1):
primals_1 = self.pool_layer.weight
primals_2 = self.pool_layer.bias
primals_3 = self.linear1.weight
primals_6 = self.linear1.bias
primals_4 = self.linear2.weight
primals_8 = self.linear2.bias
primals_5 = input_0
primals_7 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| sigeisler/grb | SAGEConv | false | 16,451 | [
"MIT"
] | 51 | c89e21076dc05d1edb87dfe2eff20c29ba6bd0c1 | https://github.com/sigeisler/grb/tree/c89e21076dc05d1edb87dfe2eff20c29ba6bd0c1 |
GCNConv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/l4/cl4boort6vfsvh6h6bfd4lck36kbmtipkqcrnhckuuxer6sfib77.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.zeros]
# Source node to ATen node mapping:
# x_1 => full
# Graph fragment:
# %full : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
triton_poi_fused_zeros_0 = async_compile.triton('triton_poi_fused_zeros_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_zeros_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_zeros_0(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, primals_3, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.zeros]
stream0 = get_raw_stream(0)
triton_poi_fused_zeros_0.run(buf1, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._sparse_addmm]
buf2 = torch.ops.aten._sparse_addmm.default(reinterpret_tensor(buf1, (4, 4), (1, 4), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), beta=0)
del buf0
del buf1
buf3 = buf2
del buf2
return (reinterpret_tensor(buf3, (4, 4), (1, 4), 0), primals_3, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
class GCNConv(nn.Module):
"""
Description
-----------
GCN convolutional layer.
Parameters
----------
in_features : int
Dimension of input features.
out_features : int
Dimension of output features.
activation : func of torch.nn.functional, optional
Activation function. Default: ``None``.
residual : bool, optional
Whether to use residual connection. Default: ``False``.
dropout : float, optional
Dropout rate during training. Default: ``0.0``.
"""
def __init__(self, in_features, out_features, activation=None, residual
=False, dropout=0.0):
super(GCNConv, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.linear = nn.Linear(in_features, out_features)
if residual:
self.residual = nn.Linear(in_features, out_features)
else:
self.residual = None
self.activation = activation
if dropout > 0.0:
self.dropout = nn.Dropout(dropout)
else:
self.dropout = None
self.reset_parameters()
def reset_parameters(self):
"""Reset parameters."""
if self.activation == F.leaky_relu:
gain = nn.init.calculate_gain('leaky_relu')
else:
gain = nn.init.calculate_gain('relu')
nn.init.xavier_normal_(self.linear.weight, gain=gain)
def forward(self, x, adj):
"""
Parameters
----------
x : torch.Tensor
Tensor of input features.
adj : torch.SparseTensor
Sparse tensor of adjacency matrix.
Returns
-------
x : torch.Tensor
Output of layer.
"""
x = self.linear(x)
x = torch.sparse.mm(adj, x)
if self.activation is not None:
x = self.activation(x)
if self.residual is not None:
x = x + self.residual(x)
if self.dropout is not None:
x = self.dropout(x)
return x
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn.functional as F
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, primals_3, reinterpret_tensor(
primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_zeros_0[grid(16)](buf1, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf2 = torch.ops.aten._sparse_addmm.default(reinterpret_tensor(buf1,
(4, 4), (1, 4), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), beta=0)
del buf0
del buf1
buf3 = buf2
del buf2
return reinterpret_tensor(buf3, (4, 4), (1, 4), 0), primals_3, primals_4
class GCNConvNew(nn.Module):
"""
Description
-----------
GCN convolutional layer.
Parameters
----------
in_features : int
Dimension of input features.
out_features : int
Dimension of output features.
activation : func of torch.nn.functional, optional
Activation function. Default: ``None``.
residual : bool, optional
Whether to use residual connection. Default: ``False``.
dropout : float, optional
Dropout rate during training. Default: ``0.0``.
"""
def __init__(self, in_features, out_features, activation=None, residual
=False, dropout=0.0):
super(GCNConvNew, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.linear = nn.Linear(in_features, out_features)
if residual:
self.residual = nn.Linear(in_features, out_features)
else:
self.residual = None
self.activation = activation
if dropout > 0.0:
self.dropout = nn.Dropout(dropout)
else:
self.dropout = None
self.reset_parameters()
def reset_parameters(self):
"""Reset parameters."""
if self.activation == F.leaky_relu:
gain = nn.init.calculate_gain('leaky_relu')
else:
gain = nn.init.calculate_gain('relu')
nn.init.xavier_normal_(self.linear.weight, gain=gain)
def forward(self, input_0, input_1):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| sigeisler/grb | GCNConv | false | 16,452 | [
"MIT"
] | 51 | c89e21076dc05d1edb87dfe2eff20c29ba6bd0c1 | https://github.com/sigeisler/grb/tree/c89e21076dc05d1edb87dfe2eff20c29ba6bd0c1 |
localSubNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/xf/cxfcqgqzkxpafh7ngi5lmyvtgswx4bwrz3upkkjlnp6wzixzpn5k.py
# Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# input_1 => convolution
# input_2 => gt, mul, where
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.01), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
triton_poi_fused_convolution_leaky_relu_0 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 32
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/wv/cwvlgiiynchbeju4rilglrk4ynrtgcixgjkdwocknt4i3bammlvv.py
# Topologically Sorted Source Nodes: [input_31, input_32, local_layer], Original ATen: [aten.convolution, aten.tanh, aten.mul]
# Source node to ATen node mapping:
# input_31 => convolution_15
# input_32 => tanh
# local_layer => mul_15
# Graph fragment:
# %convolution_15 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%where_14, %primals_32, %primals_33, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%convolution_15,), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh, 0.25), kwargs = {})
triton_poi_fused_convolution_mul_tanh_1 = async_compile.triton('triton_poi_fused_convolution_mul_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_mul_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_mul_tanh_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 49152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 3
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tmp4 = 0.25
tmp5 = tmp3 * tmp4
tl.store(in_out_ptr0 + (x3), tmp2, None)
tl.store(out_ptr0 + (x3), tmp5, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33 = args
args.clear()
assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (32, ), (1, ))
assert_size_stride(primals_6, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (32, ), (1, ))
assert_size_stride(primals_8, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_9, (32, ), (1, ))
assert_size_stride(primals_10, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_11, (32, ), (1, ))
assert_size_stride(primals_12, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_13, (32, ), (1, ))
assert_size_stride(primals_14, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_15, (32, ), (1, ))
assert_size_stride(primals_16, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_17, (32, ), (1, ))
assert_size_stride(primals_18, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_19, (32, ), (1, ))
assert_size_stride(primals_20, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_21, (32, ), (1, ))
assert_size_stride(primals_22, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_23, (32, ), (1, ))
assert_size_stride(primals_24, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_25, (32, ), (1, ))
assert_size_stride(primals_26, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_27, (32, ), (1, ))
assert_size_stride(primals_28, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_29, (32, ), (1, ))
assert_size_stride(primals_30, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_31, (32, ), (1, ))
assert_size_stride(primals_32, (3, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_33, (3, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool)
buf2 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 524288, grid=grid(524288), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf4 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool)
buf5 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [input_3, input_4], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_0.run(buf3, primals_5, buf4, buf5, 524288, grid=grid(524288), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [input_5], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf7 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool)
buf8 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [input_5, input_6], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_0.run(buf6, primals_7, buf7, buf8, 524288, grid=grid(524288), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [input_7], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf8, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf10 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool)
buf11 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [input_7, input_8], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_0.run(buf9, primals_9, buf10, buf11, 524288, grid=grid(524288), stream=stream0)
del primals_9
# Topologically Sorted Source Nodes: [input_9], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf11, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf13 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool)
buf14 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [input_9, input_10], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_0.run(buf12, primals_11, buf13, buf14, 524288, grid=grid(524288), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [input_11], Original ATen: [aten.convolution]
buf15 = extern_kernels.convolution(buf14, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf16 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool)
buf17 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [input_11, input_12], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_0.run(buf15, primals_13, buf16, buf17, 524288, grid=grid(524288), stream=stream0)
del primals_13
# Topologically Sorted Source Nodes: [input_13], Original ATen: [aten.convolution]
buf18 = extern_kernels.convolution(buf17, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf19 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool)
buf20 = buf15; del buf15 # reuse
# Topologically Sorted Source Nodes: [input_13, input_14], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_0.run(buf18, primals_15, buf19, buf20, 524288, grid=grid(524288), stream=stream0)
del primals_15
# Topologically Sorted Source Nodes: [input_15], Original ATen: [aten.convolution]
buf21 = extern_kernels.convolution(buf20, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf22 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool)
buf23 = buf18; del buf18 # reuse
# Topologically Sorted Source Nodes: [input_15, input_16], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_0.run(buf21, primals_17, buf22, buf23, 524288, grid=grid(524288), stream=stream0)
del primals_17
# Topologically Sorted Source Nodes: [input_17], Original ATen: [aten.convolution]
buf24 = extern_kernels.convolution(buf23, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf25 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool)
buf26 = buf21; del buf21 # reuse
# Topologically Sorted Source Nodes: [input_17, input_18], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_0.run(buf24, primals_19, buf25, buf26, 524288, grid=grid(524288), stream=stream0)
del primals_19
# Topologically Sorted Source Nodes: [input_19], Original ATen: [aten.convolution]
buf27 = extern_kernels.convolution(buf26, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf27, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf28 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool)
buf29 = buf24; del buf24 # reuse
# Topologically Sorted Source Nodes: [input_19, input_20], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_0.run(buf27, primals_21, buf28, buf29, 524288, grid=grid(524288), stream=stream0)
del primals_21
# Topologically Sorted Source Nodes: [input_21], Original ATen: [aten.convolution]
buf30 = extern_kernels.convolution(buf29, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf31 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool)
buf32 = buf27; del buf27 # reuse
# Topologically Sorted Source Nodes: [input_21, input_22], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_0.run(buf30, primals_23, buf31, buf32, 524288, grid=grid(524288), stream=stream0)
del primals_23
# Topologically Sorted Source Nodes: [input_23], Original ATen: [aten.convolution]
buf33 = extern_kernels.convolution(buf32, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf33, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf34 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool)
buf35 = buf30; del buf30 # reuse
# Topologically Sorted Source Nodes: [input_23, input_24], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_0.run(buf33, primals_25, buf34, buf35, 524288, grid=grid(524288), stream=stream0)
del primals_25
# Topologically Sorted Source Nodes: [input_25], Original ATen: [aten.convolution]
buf36 = extern_kernels.convolution(buf35, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf37 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool)
buf38 = buf33; del buf33 # reuse
# Topologically Sorted Source Nodes: [input_25, input_26], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_0.run(buf36, primals_27, buf37, buf38, 524288, grid=grid(524288), stream=stream0)
del primals_27
# Topologically Sorted Source Nodes: [input_27], Original ATen: [aten.convolution]
buf39 = extern_kernels.convolution(buf38, primals_28, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf39, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf40 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool)
buf41 = buf36; del buf36 # reuse
# Topologically Sorted Source Nodes: [input_27, input_28], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_0.run(buf39, primals_29, buf40, buf41, 524288, grid=grid(524288), stream=stream0)
del primals_29
# Topologically Sorted Source Nodes: [input_29], Original ATen: [aten.convolution]
buf42 = extern_kernels.convolution(buf41, primals_30, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf42, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf43 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool)
buf44 = buf39; del buf39 # reuse
# Topologically Sorted Source Nodes: [input_29, input_30], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_0.run(buf42, primals_31, buf43, buf44, 524288, grid=grid(524288), stream=stream0)
del buf42
del primals_31
# Topologically Sorted Source Nodes: [input_31], Original ATen: [aten.convolution]
buf45 = extern_kernels.convolution(buf44, primals_32, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf45, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf46 = buf45; del buf45 # reuse
buf47 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_31, input_32, local_layer], Original ATen: [aten.convolution, aten.tanh, aten.mul]
triton_poi_fused_convolution_mul_tanh_1.run(buf46, primals_33, buf47, 49152, grid=grid(49152), stream=stream0)
del primals_33
return (buf47, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, buf1, buf2, buf4, buf5, buf7, buf8, buf10, buf11, buf13, buf14, buf16, buf17, buf19, buf20, buf22, buf23, buf25, buf26, buf28, buf29, buf31, buf32, buf34, buf35, buf37, buf38, buf40, buf41, buf43, buf44, buf46, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_28 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_29 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_30 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_31 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_32 = rand_strided((3, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_33 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class localSubNet(nn.Module):
def __init__(self, blockDepth=16, convDepth=32, scale=0.25):
super(localSubNet, self).__init__()
self.blockDepth = blockDepth
self.convDepth = convDepth
self.scale = scale
self.net = torch.nn.Sequential()
for i in range(self.blockDepth):
if i != self.blockDepth - 1:
if i == 0:
conv = torch.nn.Conv2d(3, self.convDepth, 3, padding=1)
torch.nn.init.kaiming_normal_(conv.weight)
torch.nn.init.zeros_(conv.bias)
else:
conv = torch.nn.Conv2d(self.convDepth, self.convDepth,
3, padding=1)
torch.nn.init.kaiming_normal_(conv.weight)
torch.nn.init.zeros_(conv.bias)
self.net.add_module('conv%d' % i, conv)
self.net.add_module('leakyRelu%d' % i, torch.nn.LeakyReLU(
inplace=False))
else:
conv = torch.nn.Conv2d(self.convDepth, 3, 3, padding=1)
torch.nn.init.kaiming_normal_(conv.weight)
torch.nn.init.zeros_(conv.bias)
self.net.add_module('conv%d' % i, conv)
self.net.add_module('tanh-out', torch.nn.Tanh())
def forward(self, x):
local_layer = self.net(x) * self.scale
return local_layer
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 32
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_convolution_mul_tanh_1(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 3
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tmp4 = 0.25
tmp5 = tmp3 * tmp4
tl.store(in_out_ptr0 + x3, tmp2, None)
tl.store(out_ptr0 + x3, tmp5, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32, primals_33
) = args
args.clear()
assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (32,), (1,))
assert_size_stride(primals_8, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_9, (32,), (1,))
assert_size_stride(primals_10, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_11, (32,), (1,))
assert_size_stride(primals_12, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_13, (32,), (1,))
assert_size_stride(primals_14, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_15, (32,), (1,))
assert_size_stride(primals_16, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_17, (32,), (1,))
assert_size_stride(primals_18, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_19, (32,), (1,))
assert_size_stride(primals_20, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_21, (32,), (1,))
assert_size_stride(primals_22, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_23, (32,), (1,))
assert_size_stride(primals_24, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_25, (32,), (1,))
assert_size_stride(primals_26, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_27, (32,), (1,))
assert_size_stride(primals_28, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_29, (32,), (1,))
assert_size_stride(primals_30, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_31, (32,), (1,))
assert_size_stride(primals_32, (3, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_33, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
buf2 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf0,
primals_2, buf1, buf2, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_2
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf4 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
buf5 = buf0
del buf0
triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf3,
primals_5, buf4, buf5, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_5
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf7 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
buf8 = buf3
del buf3
triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf6,
primals_7, buf7, buf8, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_7
buf9 = extern_kernels.convolution(buf8, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf10 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
buf11 = buf6
del buf6
triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf9,
primals_9, buf10, buf11, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_9
buf12 = extern_kernels.convolution(buf11, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf13 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
buf14 = buf9
del buf9
triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf12,
primals_11, buf13, buf14, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_11
buf15 = extern_kernels.convolution(buf14, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf16 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
buf17 = buf12
del buf12
triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf15,
primals_13, buf16, buf17, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_13
buf18 = extern_kernels.convolution(buf17, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf19 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
buf20 = buf15
del buf15
triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf18,
primals_15, buf19, buf20, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_15
buf21 = extern_kernels.convolution(buf20, primals_16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf22 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
buf23 = buf18
del buf18
triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf21,
primals_17, buf22, buf23, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_17
buf24 = extern_kernels.convolution(buf23, primals_18, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf25 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
buf26 = buf21
del buf21
triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf24,
primals_19, buf25, buf26, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_19
buf27 = extern_kernels.convolution(buf26, primals_20, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf27, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf28 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
buf29 = buf24
del buf24
triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf27,
primals_21, buf28, buf29, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_21
buf30 = extern_kernels.convolution(buf29, primals_22, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf31 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
buf32 = buf27
del buf27
triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf30,
primals_23, buf31, buf32, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_23
buf33 = extern_kernels.convolution(buf32, primals_24, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf33, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf34 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
buf35 = buf30
del buf30
triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf33,
primals_25, buf34, buf35, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_25
buf36 = extern_kernels.convolution(buf35, primals_26, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf37 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
buf38 = buf33
del buf33
triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf36,
primals_27, buf37, buf38, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_27
buf39 = extern_kernels.convolution(buf38, primals_28, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf39, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf40 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
buf41 = buf36
del buf36
triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf39,
primals_29, buf40, buf41, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_29
buf42 = extern_kernels.convolution(buf41, primals_30, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf42, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf43 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
buf44 = buf39
del buf39
triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf42,
primals_31, buf43, buf44, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf42
del primals_31
buf45 = extern_kernels.convolution(buf44, primals_32, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf45, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf46 = buf45
del buf45
buf47 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1),
torch.float32)
triton_poi_fused_convolution_mul_tanh_1[grid(49152)](buf46,
primals_33, buf47, 49152, XBLOCK=256, num_warps=4, num_stages=1)
del primals_33
return (buf47, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, primals_16, primals_18,
primals_20, primals_22, primals_24, primals_26, primals_28,
primals_30, primals_32, buf1, buf2, buf4, buf5, buf7, buf8, buf10,
buf11, buf13, buf14, buf16, buf17, buf19, buf20, buf22, buf23,
buf25, buf26, buf28, buf29, buf31, buf32, buf34, buf35, buf37,
buf38, buf40, buf41, buf43, buf44, buf46)
class localSubNetNew(nn.Module):
def __init__(self, blockDepth=16, convDepth=32, scale=0.25):
super(localSubNetNew, self).__init__()
self.blockDepth = blockDepth
self.convDepth = convDepth
self.scale = scale
self.net = torch.nn.Sequential()
for i in range(self.blockDepth):
if i != self.blockDepth - 1:
if i == 0:
conv = torch.nn.Conv2d(3, self.convDepth, 3, padding=1)
torch.nn.init.kaiming_normal_(conv.weight)
torch.nn.init.zeros_(conv.bias)
else:
conv = torch.nn.Conv2d(self.convDepth, self.convDepth,
3, padding=1)
torch.nn.init.kaiming_normal_(conv.weight)
torch.nn.init.zeros_(conv.bias)
self.net.add_module('conv%d' % i, conv)
self.net.add_module('leakyRelu%d' % i, torch.nn.LeakyReLU(
inplace=False))
else:
conv = torch.nn.Conv2d(self.convDepth, 3, 3, padding=1)
torch.nn.init.kaiming_normal_(conv.weight)
torch.nn.init.zeros_(conv.bias)
self.net.add_module('conv%d' % i, conv)
self.net.add_module('tanh-out', torch.nn.Tanh())
def forward(self, input_0):
primals_1 = self.net.conv0.weight
primals_2 = self.net.conv0.bias
primals_4 = self.net.conv1.weight
primals_5 = self.net.conv1.bias
primals_6 = self.net.conv2.weight
primals_7 = self.net.conv2.bias
primals_8 = self.net.conv3.weight
primals_9 = self.net.conv3.bias
primals_10 = self.net.conv4.weight
primals_11 = self.net.conv4.bias
primals_12 = self.net.conv5.weight
primals_13 = self.net.conv5.bias
primals_14 = self.net.conv6.weight
primals_15 = self.net.conv6.bias
primals_16 = self.net.conv7.weight
primals_17 = self.net.conv7.bias
primals_18 = self.net.conv8.weight
primals_19 = self.net.conv8.bias
primals_20 = self.net.conv9.weight
primals_21 = self.net.conv9.bias
primals_22 = self.net.conv10.weight
primals_23 = self.net.conv10.bias
primals_24 = self.net.conv11.weight
primals_25 = self.net.conv11.bias
primals_26 = self.net.conv12.weight
primals_27 = self.net.conv12.bias
primals_28 = self.net.conv13.weight
primals_29 = self.net.conv13.bias
primals_30 = self.net.conv14.weight
primals_31 = self.net.conv14.bias
primals_32 = self.net.conv15.weight
primals_33 = self.net.conv15.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33])
return output[0]
| shir-barzel-healthy/CIE_XYZ_NET | localSubNet | false | 16,453 | [
"MIT"
] | 64 | 9aabf5222dd81efa518233340dc3313177927e27 | https://github.com/shir-barzel-healthy/CIE_XYZ_NET/tree/9aabf5222dd81efa518233340dc3313177927e27 |
GRUStep | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/c4/cc4khg7fwbxxm2fufox7nnkf4gfybrmj5ir2tx3zuxfioc5b2dya.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], -1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5b/c5bkw2jxfpnk3o5xqifvptqcde6oukvmpsxncnrr4hbmq6dbwwvm.py
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_2 => cat_2
# Graph fragment:
# %cat_2 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mul, %primals_2], -1), kwargs = {})
triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.sigmoid(tmp5)
tmp7 = tl.load(in_ptr1 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 * tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tmp12 = tl.full([1], 8, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tl.load(in_ptr2 + ((4*x1) + ((-4) + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tl.where(tmp4, tmp10, tmp14)
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/tn/ctn3hks2zjt4xmjlguifn25zx6whcel77cdlsr33hdq2oyumfsoc.py
# Topologically Sorted Source Nodes: [z, t, sub, mul_1, mul_2, h_state], Original ATen: [aten.sigmoid, aten.tanh, aten.rsub, aten.mul, aten.add]
# Source node to ATen node mapping:
# h_state => add
# mul_1 => mul_1
# mul_2 => mul_2
# sub => sub
# t => tanh
# z => sigmoid
# Graph fragment:
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_1,), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_5,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %primals_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %tanh), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_2), kwargs = {})
triton_poi_fused_add_mul_rsub_sigmoid_tanh_2 = async_compile.triton('triton_poi_fused_add_mul_rsub_sigmoid_tanh_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_sigmoid_tanh_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_tanh_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp4 = tl.load(in_ptr1 + (x0), xmask)
tmp6 = tl.load(in_ptr2 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp5 = tmp3 * tmp4
tmp7 = libdevice.tanh(tmp6)
tmp8 = tmp1 * tmp7
tmp9 = tmp5 + tmp8
tl.store(out_ptr0 + (x0), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4, 8), (8, 1))
assert_size_stride(primals_5, (4, 8), (8, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 512, grid=grid(512), stream=stream0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(primals_4, (8, 4), (1, 8), 0), out=buf2)
del primals_4
buf3 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(buf2, primals_1, primals_2, buf3, 512, grid=grid(512), stream=stream0)
del primals_2
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf3, (64, 8), (8, 1), 0), reinterpret_tensor(primals_5, (8, 4), (1, 8), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [z, t, sub, mul_1, mul_2, h_state], Original ATen: [aten.sigmoid, aten.tanh, aten.rsub, aten.mul, aten.add]
triton_poi_fused_add_mul_rsub_sigmoid_tanh_2.run(buf1, primals_1, buf4, buf5, 256, grid=grid(256), stream=stream0)
return (buf5, primals_1, reinterpret_tensor(buf0, (64, 8), (8, 1), 0), buf1, buf2, reinterpret_tensor(buf3, (64, 8), (8, 1), 0), buf4, primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class GRUStep(nn.Module):
def __init__(self, hidden_size, input_size):
super(GRUStep, self).__init__()
"""GRU module"""
self.linear_z = nn.Linear(hidden_size + input_size, hidden_size,
bias=False)
self.linear_r = nn.Linear(hidden_size + input_size, hidden_size,
bias=False)
self.linear_t = nn.Linear(hidden_size + input_size, hidden_size,
bias=False)
def forward(self, h_state, input_):
z = torch.sigmoid(self.linear_z(torch.cat([h_state, input_], -1)))
r = torch.sigmoid(self.linear_r(torch.cat([h_state, input_], -1)))
t = torch.tanh(self.linear_t(torch.cat([r * h_state, input_], -1)))
h_state = (1 - z) * h_state + z * t
return h_state
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4, 'input_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.sigmoid(tmp5)
tmp7 = tl.load(in_ptr1 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp8 = tmp6 * tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp14 = tl.load(in_ptr2 + (4 * x1 + (-4 + x0)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp15 = tl.where(tmp4, tmp10, tmp14)
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_tanh_2(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr1 + x0, xmask)
tmp6 = tl.load(in_ptr2 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp5 = tmp3 * tmp4
tmp7 = libdevice.tanh(tmp6)
tmp8 = tmp1 * tmp7
tmp9 = tmp5 + tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4, 8), (8, 1))
assert_size_stride(primals_5, (4, 8), (8, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512,
XBLOCK=256, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_4, (8, 4), (1, 8), 0), out=buf2)
del primals_4
buf3 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
triton_poi_fused_cat_1[grid(512)](buf2, primals_1, primals_2, buf3,
512, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_5, (8, 4), (1, 8), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_rsub_sigmoid_tanh_2[grid(256)](buf1,
primals_1, buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
return buf5, primals_1, reinterpret_tensor(buf0, (64, 8), (8, 1), 0
), buf1, buf2, reinterpret_tensor(buf3, (64, 8), (8, 1), 0
), buf4, primals_5
class GRUStepNew(nn.Module):
def __init__(self, hidden_size, input_size):
super(GRUStepNew, self).__init__()
"""GRU module"""
self.linear_z = nn.Linear(hidden_size + input_size, hidden_size,
bias=False)
self.linear_r = nn.Linear(hidden_size + input_size, hidden_size,
bias=False)
self.linear_t = nn.Linear(hidden_size + input_size, hidden_size,
bias=False)
def forward(self, input_0, input_1):
primals_3 = self.linear_z.weight
primals_4 = self.linear_r.weight
primals_5 = self.linear_t.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| siyangZhao/BAMnet | GRUStep | false | 16,454 | [
"Apache-2.0"
] | 170 | 4c6222610c120a4a114daf40938219ea0ca57dc6 | https://github.com/siyangZhao/BAMnet/tree/4c6222610c120a4a114daf40938219ea0ca57dc6 |
AgreementRouting | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/um/cum65j23qchrjf5dndblqgbw6zomhgwfj2obfidtgy7b5j3zwklm.py
# Topologically Sorted Source Nodes: [c], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# c => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%primals_2, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_2, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/wk/cwk2wao7opapqbjj7klnqrd6tgist3ts3nc5veryzhzstwpx7d4l.py
# Topologically Sorted Source Nodes: [c], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# c => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/aj/cajnfn4dfbc6usnkqebmhnks35kkrodsrc3gynfsb56b5tcpnit4.py
# Topologically Sorted Source Nodes: [mul, s], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# mul => mul
# s => sum_2
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze, %primals_1), kwargs = {})
# %sum_2 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
triton_poi_fused_mul_sum_2 = async_compile.triton('triton_poi_fused_mul_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x3 = xindex % 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x3 + (64*x2)), xmask)
tmp3 = tl.load(in_ptr0 + (4 + x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (16 + x3 + (64*x2)), xmask)
tmp7 = tl.load(in_ptr0 + (8 + x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (32 + x3 + (64*x2)), xmask)
tmp11 = tl.load(in_ptr0 + (12 + x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (48 + x3 + (64*x2)), xmask)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + (x4), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/6c/c6cxitoinplot2lb2nsqjxcfvoeulhjikauaic44zgpmnhn6m6dm.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# x => mul_1
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_2, %view), kwargs = {})
triton_poi_fused_mul_3 = async_compile.triton('triton_poi_fused_mul_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = 1.0
tmp13 = tmp11 + tmp12
tmp14 = tmp11 / tmp13
tmp15 = libdevice.sqrt(tmp11)
tmp16 = tmp14 / tmp15
tmp17 = tmp0 * tmp16
tl.store(out_ptr0 + (x2), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/tz/ctzkrdbwmdk4havssadppcim4uiewy6wtjyndwqkx4keavz6xu6j.py
# Topologically Sorted Source Nodes: [mul_2, sum_3, b_batch_1], Original ATen: [aten.mul, aten.sum, aten.add]
# Source node to ATen node mapping:
# b_batch_1 => add_1
# mul_2 => mul_2
# sum_3 => sum_4
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %unsqueeze_1), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [-1]), kwargs = {})
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand, %sum_4), kwargs = {})
triton_poi_fused_add_mul_sum_4 = async_compile.triton('triton_poi_fused_add_mul_sum_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sum_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sum_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 16
x4 = xindex
x0 = xindex % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x4), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + ((4*x0) + (16*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x4)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (1 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x4)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (2 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x4)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr2 + (3 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 * tmp2
tmp6 = tmp4 * tmp5
tmp7 = tmp3 + tmp6
tmp10 = tmp8 * tmp9
tmp11 = tmp7 + tmp10
tmp14 = tmp12 * tmp13
tmp15 = tmp11 + tmp14
tmp16 = tmp0 + tmp15
tl.store(out_ptr0 + (x4), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/j7/cj7mv2k5l2kigfluq2rwwpouckm4oow7jia7wwvjogp3qlr23xwv.py
# Topologically Sorted Source Nodes: [softmax_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax_1 => amax_1, exp_1, sub_1
# Graph fragment:
# %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_1, [1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %amax_1), kwargs = {})
# %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
triton_poi_fused__softmax_5 = async_compile.triton('triton_poi_fused__softmax_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/b4/cb4vdtviarmy2ckmxjdkc3dnwp4gssl3dh4u4col3s2illvdpvql.py
# Topologically Sorted Source Nodes: [softmax_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax_1 => div_3, sum_5
# Graph fragment:
# %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1], True), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_5), kwargs = {})
triton_poi_fused__softmax_6 = async_compile.triton('triton_poi_fused__softmax_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/hk/chkymnmrtvm26agoxolleunayy4vpu2ntpnerpf3vgesh7nmasco.py
# Topologically Sorted Source Nodes: [mul_3, s_1], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# mul_3 => mul_3
# s_1 => sum_6
# Graph fragment:
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, %primals_1), kwargs = {})
# %sum_6 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_3, [1]), kwargs = {})
triton_poi_fused_mul_sum_7 = async_compile.triton('triton_poi_fused_mul_sum_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sum_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x3 = xindex % 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + (16*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x3 + (64*x2)), xmask)
tmp3 = tl.load(in_ptr0 + (4 + x1 + (16*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (16 + x3 + (64*x2)), xmask)
tmp7 = tl.load(in_ptr0 + (8 + x1 + (16*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (32 + x3 + (64*x2)), xmask)
tmp11 = tl.load(in_ptr0 + (12 + x1 + (16*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (48 + x3 + (64*x2)), xmask)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + (x4), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/wy/cwy6nwovy7yqtkn7mqqun5gnuovqjt4rhesigmga3qlwtxqxvuoo.py
# Topologically Sorted Source Nodes: [mul_5, sum_6, b_batch_2], Original ATen: [aten.mul, aten.sum, aten.add]
# Source node to ATen node mapping:
# b_batch_2 => add_3
# mul_5 => mul_5
# sum_6 => sum_8
# Graph fragment:
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %unsqueeze_2), kwargs = {})
# %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_5, [-1]), kwargs = {})
# %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %sum_8), kwargs = {})
triton_poi_fused_add_mul_sum_8 = async_compile.triton('triton_poi_fused_add_mul_sum_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sum_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sum_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (4*x3), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + ((4*x0) + (16*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + (4*x3)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (1 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x3)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (2 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (3 + (4*x3)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (3 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 * tmp2
tmp6 = tmp4 * tmp5
tmp7 = tmp3 + tmp6
tmp10 = tmp8 * tmp9
tmp11 = tmp7 + tmp10
tmp14 = tmp12 * tmp13
tmp15 = tmp11 + tmp14
tmp16 = tmp0 + tmp15
tl.store(in_out_ptr0 + (x3), tmp16, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [c], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(primals_2, buf0, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [c], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf0, buf1, 16, grid=grid(16), stream=stream0)
del buf0
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, s], Original ATen: [aten.mul, aten.sum]
triton_poi_fused_mul_sum_2.run(buf1, primals_1, buf2, 64, grid=grid(64), stream=stream0)
del buf1
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mul]
triton_poi_fused_mul_3.run(buf2, buf3, 64, grid=grid(64), stream=stream0)
buf4 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [mul_2, sum_3, b_batch_1], Original ATen: [aten.mul, aten.sum, aten.add]
triton_poi_fused_add_mul_sum_4.run(primals_2, primals_1, buf3, buf4, 64, grid=grid(64), stream=stream0)
buf5 = reinterpret_tensor(buf3, (16, 4), (4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [softmax_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf4, buf5, 64, grid=grid(64), stream=stream0)
buf6 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_6.run(buf5, buf6, 64, grid=grid(64), stream=stream0)
buf7 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [mul_3, s_1], Original ATen: [aten.mul, aten.sum]
triton_poi_fused_mul_sum_7.run(buf6, primals_1, buf7, 64, grid=grid(64), stream=stream0)
buf8 = reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.mul]
triton_poi_fused_mul_3.run(buf7, buf8, 64, grid=grid(64), stream=stream0)
buf9 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [mul_5, sum_6, b_batch_2], Original ATen: [aten.mul, aten.sum, aten.add]
triton_poi_fused_add_mul_sum_8.run(buf9, primals_1, buf8, 64, grid=grid(64), stream=stream0)
buf10 = reinterpret_tensor(buf8, (16, 4), (4, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [softmax_2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf9, buf10, 64, grid=grid(64), stream=stream0)
buf11 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [softmax_2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_6.run(buf10, buf11, 64, grid=grid(64), stream=stream0)
buf12 = reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0); del buf10 # reuse
# Topologically Sorted Source Nodes: [mul_6, s_2], Original ATen: [aten.mul, aten.sum]
triton_poi_fused_mul_sum_7.run(buf11, primals_1, buf12, 64, grid=grid(64), stream=stream0)
buf13 = reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0); del buf11 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.mul]
triton_poi_fused_mul_3.run(buf12, buf13, 64, grid=grid(64), stream=stream0)
buf14 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [mul_8, sum_9, b_batch_3], Original ATen: [aten.mul, aten.sum, aten.add]
triton_poi_fused_add_mul_sum_8.run(buf14, primals_1, buf13, 64, grid=grid(64), stream=stream0)
buf15 = reinterpret_tensor(buf13, (16, 4), (4, 1), 0); del buf13 # reuse
# Topologically Sorted Source Nodes: [softmax_3], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf14, buf15, 64, grid=grid(64), stream=stream0)
buf16 = reinterpret_tensor(buf12, (16, 4), (4, 1), 0); del buf12 # reuse
# Topologically Sorted Source Nodes: [softmax_3], Original ATen: [aten._softmax]
triton_poi_fused__softmax_6.run(buf15, buf16, 64, grid=grid(64), stream=stream0)
buf17 = reinterpret_tensor(buf15, (4, 4, 4), (16, 4, 1), 0); del buf15 # reuse
# Topologically Sorted Source Nodes: [mul_9, s_3], Original ATen: [aten.mul, aten.sum]
triton_poi_fused_mul_sum_7.run(buf16, primals_1, buf17, 64, grid=grid(64), stream=stream0)
buf18 = reinterpret_tensor(buf16, (4, 4, 4), (16, 4, 1), 0); del buf16 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.mul]
triton_poi_fused_mul_3.run(buf17, buf18, 64, grid=grid(64), stream=stream0)
del buf17
buf19 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [mul_11, sum_12, b_batch_4], Original ATen: [aten.mul, aten.sum, aten.add]
triton_poi_fused_add_mul_sum_8.run(buf19, primals_1, buf18, 64, grid=grid(64), stream=stream0)
buf20 = reinterpret_tensor(buf18, (16, 4), (4, 1), 0); del buf18 # reuse
# Topologically Sorted Source Nodes: [softmax_4], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf19, buf20, 64, grid=grid(64), stream=stream0)
buf21 = reinterpret_tensor(buf19, (16, 4), (4, 1), 0); del buf19 # reuse
# Topologically Sorted Source Nodes: [softmax_4], Original ATen: [aten._softmax]
triton_poi_fused__softmax_6.run(buf20, buf21, 64, grid=grid(64), stream=stream0)
buf22 = reinterpret_tensor(buf20, (4, 4, 4), (16, 4, 1), 0); del buf20 # reuse
# Topologically Sorted Source Nodes: [mul_12, s_4], Original ATen: [aten.mul, aten.sum]
triton_poi_fused_mul_sum_7.run(buf21, primals_1, buf22, 64, grid=grid(64), stream=stream0)
buf23 = reinterpret_tensor(buf21, (4, 4, 4), (16, 4, 1), 0); del buf21 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.mul]
triton_poi_fused_mul_3.run(buf22, buf23, 64, grid=grid(64), stream=stream0)
del buf22
return (buf23, primals_1, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
def squash(x):
lengths2 = x.pow(2).sum(dim=2)
lengths = lengths2.sqrt()
x = x * (lengths2 / (1 + lengths2) / lengths).view(x.size(0), x.size(1), 1)
return x
class AgreementRouting(nn.Module):
def __init__(self, input_caps, output_caps, n_iterations):
super(AgreementRouting, self).__init__()
self.n_iterations = n_iterations
self.b = nn.Parameter(torch.zeros((input_caps, output_caps)))
def forward(self, u_predict):
batch_size, input_caps, output_caps, _output_dim = u_predict.size()
c = F.softmax(self.b, dim=1)
s = (c.unsqueeze(2) * u_predict).sum(dim=1)
v = squash(s)
if self.n_iterations > 0:
b_batch = self.b.expand((batch_size, input_caps, output_caps))
for r in range(self.n_iterations):
v = v.unsqueeze(1)
b_batch = b_batch + (u_predict * v).sum(-1)
c = F.softmax(b_batch.view(-1, output_caps), dim=1).view(-1,
input_caps, output_caps, 1)
s = (c * u_predict).sum(dim=1)
v = squash(s)
return v
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_caps': 4, 'output_caps': 4, 'n_iterations': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex % 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x3 + 64 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (4 + x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (16 + x3 + 64 * x2), xmask)
tmp7 = tl.load(in_ptr0 + (8 + x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (32 + x3 + 64 * x2), xmask)
tmp11 = tl.load(in_ptr0 + (12 + x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (48 + x3 + 64 * x2), xmask)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x4, tmp14, xmask)
@triton.jit
def triton_poi_fused_mul_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = 1.0
tmp13 = tmp11 + tmp12
tmp14 = tmp11 / tmp13
tmp15 = libdevice.sqrt(tmp11)
tmp16 = tmp14 / tmp15
tmp17 = tmp0 * tmp16
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused_add_mul_sum_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 16
x4 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x4, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (4 * x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x4), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy
='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x4), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (2 + 4 * x0 + 16 * x2), xmask, eviction_policy
='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + 4 * x4), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr2 + (3 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp3 = tmp1 * tmp2
tmp6 = tmp4 * tmp5
tmp7 = tmp3 + tmp6
tmp10 = tmp8 * tmp9
tmp11 = tmp7 + tmp10
tmp14 = tmp12 * tmp13
tmp15 = tmp11 + tmp14
tmp16 = tmp0 + tmp15
tl.store(out_ptr0 + x4, tmp16, xmask)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_sum_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex % 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + (x3 + 64 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (4 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr1 + (16 + x3 + 64 * x2), xmask)
tmp7 = tl.load(in_ptr0 + (8 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr1 + (32 + x3 + 64 * x2), xmask)
tmp11 = tl.load(in_ptr0 + (12 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr1 + (48 + x3 + 64 * x2), xmask)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x4, tmp14, xmask)
@triton.jit
def triton_poi_fused_add_mul_sum_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x3, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (4 * x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (1 + 4 * x3), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy
='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x3), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (2 + 4 * x0 + 16 * x2), xmask, eviction_policy
='evict_last')
tmp12 = tl.load(in_ptr0 + (3 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr1 + (3 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp3 = tmp1 * tmp2
tmp6 = tmp4 * tmp5
tmp7 = tmp3 + tmp6
tmp10 = tmp8 * tmp9
tmp11 = tmp7 + tmp10
tmp14 = tmp12 * tmp13
tmp15 = tmp11 + tmp14
tmp16 = tmp0 + tmp15
tl.store(in_out_ptr0 + x3, tmp16, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(16)](primals_2, buf0, 16, XBLOCK=
16, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(16)](buf0, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf0
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_mul_sum_2[grid(64)](buf1, primals_1, buf2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf1
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_mul_3[grid(64)](buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = buf2
del buf2
triton_poi_fused_add_mul_sum_4[grid(64)](primals_2, primals_1, buf3,
buf4, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf5 = reinterpret_tensor(buf3, (16, 4), (4, 1), 0)
del buf3
triton_poi_fused__softmax_5[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_6[grid(64)](buf5, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0)
del buf5
triton_poi_fused_mul_sum_7[grid(64)](buf6, primals_1, buf7, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf8 = reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1), 0)
del buf6
triton_poi_fused_mul_3[grid(64)](buf7, buf8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf9 = buf4
del buf4
triton_poi_fused_add_mul_sum_8[grid(64)](buf9, primals_1, buf8, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf10 = reinterpret_tensor(buf8, (16, 4), (4, 1), 0)
del buf8
triton_poi_fused__softmax_5[grid(64)](buf9, buf10, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf11 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0)
del buf7
triton_poi_fused__softmax_6[grid(64)](buf10, buf11, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf12 = reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0)
del buf10
triton_poi_fused_mul_sum_7[grid(64)](buf11, primals_1, buf12, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf13 = reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0)
del buf11
triton_poi_fused_mul_3[grid(64)](buf12, buf13, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf14 = buf9
del buf9
triton_poi_fused_add_mul_sum_8[grid(64)](buf14, primals_1, buf13,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf15 = reinterpret_tensor(buf13, (16, 4), (4, 1), 0)
del buf13
triton_poi_fused__softmax_5[grid(64)](buf14, buf15, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf16 = reinterpret_tensor(buf12, (16, 4), (4, 1), 0)
del buf12
triton_poi_fused__softmax_6[grid(64)](buf15, buf16, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf17 = reinterpret_tensor(buf15, (4, 4, 4), (16, 4, 1), 0)
del buf15
triton_poi_fused_mul_sum_7[grid(64)](buf16, primals_1, buf17, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf18 = reinterpret_tensor(buf16, (4, 4, 4), (16, 4, 1), 0)
del buf16
triton_poi_fused_mul_3[grid(64)](buf17, buf18, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf17
buf19 = buf14
del buf14
triton_poi_fused_add_mul_sum_8[grid(64)](buf19, primals_1, buf18,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf20 = reinterpret_tensor(buf18, (16, 4), (4, 1), 0)
del buf18
triton_poi_fused__softmax_5[grid(64)](buf19, buf20, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf21 = reinterpret_tensor(buf19, (16, 4), (4, 1), 0)
del buf19
triton_poi_fused__softmax_6[grid(64)](buf20, buf21, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf22 = reinterpret_tensor(buf20, (4, 4, 4), (16, 4, 1), 0)
del buf20
triton_poi_fused_mul_sum_7[grid(64)](buf21, primals_1, buf22, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf23 = reinterpret_tensor(buf21, (4, 4, 4), (16, 4, 1), 0)
del buf21
triton_poi_fused_mul_3[grid(64)](buf22, buf23, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf22
return buf23, primals_1, primals_2
def squash(x):
lengths2 = x.pow(2).sum(dim=2)
lengths = lengths2.sqrt()
x = x * (lengths2 / (1 + lengths2) / lengths).view(x.size(0), x.size(1), 1)
return x
class AgreementRoutingNew(nn.Module):
def __init__(self, input_caps, output_caps, n_iterations):
super(AgreementRoutingNew, self).__init__()
self.n_iterations = n_iterations
self.b = nn.Parameter(torch.zeros((input_caps, output_caps)))
def forward(self, input_0):
primals_2 = self.b
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| shwetasrsh/MNIST-baselines | AgreementRouting | false | 16,455 | [
"MIT"
] | 61 | aa888e201a1dddda13e7b278cab8f940d57538db | https://github.com/shwetasrsh/MNIST-baselines/tree/aa888e201a1dddda13e7b278cab8f940d57538db |
_FPNUp | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/fp/cfp5jrxxyxrvhcpoq5tio3p5tkhj5ugdrpyur3x4v6meatzih7jn.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask)
tl.store(out_ptr0 + (y0 + (4*x2) + (64*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/pi/cpiqq6ydqends5jgnpbakdwghpnovztoymnjrsnfd3ycpt6b7cp7.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 65536
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = (yindex // 256)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/pa/cpazsbmct7bq6fofvdc67dz3gxjoouipsqf6hehsui5v7kyfrs25.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten._unsafe_index, aten.sub, aten.add]
# Source node to ATen node mapping:
# x => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add_2, add_3, add_4, clamp_max_2, clamp_max_3, clamp_min, clamp_min_2, clamp_min_3, convert_element_type, convert_element_type_1, convert_element_type_3, iota, mul, mul_2, mul_3, mul_4, sub, sub_1, sub_2, sub_3, sub_4
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota, torch.float32), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type, 1.0), kwargs = {})
# %clamp_min : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul, 0.0), kwargs = {})
# %convert_element_type_1 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {})
# %convert_element_type_3 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%clamp_min, torch.int64), kwargs = {})
# %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_2, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_2, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {})
# %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_2, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_2, [None, None, %clamp_max, %clamp_max_1]), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min, %convert_element_type_3), kwargs = {})
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {})
# %clamp_max_2 : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %clamp_max_2), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_2), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %clamp_max_2), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_3), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %convert_element_type_1), kwargs = {})
# %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_3, 0.0), kwargs = {})
# %clamp_max_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_3, 1.0), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %add_2), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_3), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %mul_4), kwargs = {})
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_2 = async_compile.triton('triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4) % 4
x0 = xindex % 4
x2 = (xindex // 16)
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 3, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tmp11 = x0
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp12 * tmp2
tmp14 = triton_helpers.maximum(tmp13, tmp4)
tmp15 = tmp14.to(tl.int32)
tmp16 = tl.load(in_ptr0 + (tmp15 + (4*tmp10) + (16*x2)), None, eviction_policy='evict_last')
tmp17 = tmp15 + tmp7
tmp18 = triton_helpers.minimum(tmp17, tmp9)
tmp19 = tl.load(in_ptr0 + (tmp18 + (4*tmp10) + (16*x2)), None, eviction_policy='evict_last')
tmp20 = tmp19 - tmp16
tmp21 = tmp15.to(tl.float32)
tmp22 = tmp14 - tmp21
tmp23 = triton_helpers.maximum(tmp22, tmp4)
tmp24 = triton_helpers.minimum(tmp23, tmp2)
tmp25 = tmp20 * tmp24
tmp26 = tmp16 + tmp25
tmp27 = tl.load(in_ptr0 + (tmp15 + (4*tmp6) + (16*x2)), None, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr0 + (tmp18 + (4*tmp6) + (16*x2)), None, eviction_policy='evict_last')
tmp29 = tmp28 - tmp27
tmp30 = tmp29 * tmp24
tmp31 = tmp27 + tmp30
tmp32 = tmp26 - tmp31
tmp33 = tmp6.to(tl.float32)
tmp34 = tmp5 - tmp33
tmp35 = triton_helpers.maximum(tmp34, tmp4)
tmp36 = triton_helpers.minimum(tmp35, tmp2)
tmp37 = tmp32 * tmp36
tmp38 = tmp31 + tmp37
tl.store(in_out_ptr0 + (x4), tmp38, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ek/cekjzomjohgpojh7mixefnswiuuc26qael44t5tnof7mmidhnsga.py
# Topologically Sorted Source Nodes: [skip, add], Original ATen: [aten.convolution, aten.add]
# Source node to ATen node mapping:
# add => add_5
# skip => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_3, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %add_5 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %convolution), kwargs = {})
triton_poi_fused_add_convolution_3 = async_compile.triton('triton_poi_fused_add_convolution_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 256], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_3(in_out_ptr0, in_ptr0, in_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = (yindex // 16)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (4096*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_out_ptr0 + (x2 + (256*y3)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.debug_barrier()
tl.store(in_out_ptr0 + (x2 + (256*y3)), tmp4, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/o6/co6dt6churpozf2rlggjsgkkxvbf2ncrmpy44yybwa2jccbraben.py
# Topologically Sorted Source Nodes: [fused], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# fused => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%add_5, %primals_5, %primals_6, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_4 = async_compile.triton('triton_poi_fused_convolution_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024, 16], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_4(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 1024
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 256
y1 = (yindex // 256)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (256*x2) + (4096*y1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (16*y3)), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 256, 4, 4), (4096, 16, 4, 1))
assert_size_stride(primals_3, (256, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (256, ), (1, ))
assert_size_stride(primals_5, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_6, (256, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 16, 16, grid=grid(16, 16), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_5, buf1, 65536, 9, grid=grid(65536, 9), stream=stream0)
del primals_5
buf2 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.float32)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten._unsafe_index, aten.sub, aten.add]
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_2.run(buf3, primals_2, 16384, grid=grid(16384), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [skip], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [skip, add], Original ATen: [aten.convolution, aten.add]
triton_poi_fused_add_convolution_3.run(buf5, buf3, primals_4, 64, 256, grid=grid(64, 256), stream=stream0)
del primals_4
# Topologically Sorted Source Nodes: [fused], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, buf1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf7 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [fused], Original ATen: [aten.convolution]
triton_poi_fused_convolution_4.run(buf6, primals_6, buf7, 1024, 16, grid=grid(1024, 16), stream=stream0)
del buf6
del primals_6
return (buf7, buf0, primals_3, buf1, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 256, 4, 4), (4096, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((256, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.init
class _FPNUp(nn.Module):
def __init__(self, num_input_features, skip_channel_adjust=True):
super().__init__()
self.conv_channel_adjust = nn.Conv2d(num_input_features, 256,
kernel_size=1)
self.conv_fusion = nn.Conv2d(256, 256, kernel_size=3, padding=1)
def forward(self, x, skip):
upsample = nn.UpsamplingBilinear2d(size=skip.size()[2:])
x = upsample(x)
skip = self.conv_channel_adjust(skip)
fused = self.conv_fusion(x + skip)
return fused
def get_inputs():
return [torch.rand([4, 256, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_input_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask)
tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_2(
in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4 % 4
x0 = xindex % 4
x2 = xindex // 16
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 3, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tmp11 = x0
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp12 * tmp2
tmp14 = triton_helpers.maximum(tmp13, tmp4)
tmp15 = tmp14.to(tl.int32)
tmp16 = tl.load(in_ptr0 + (tmp15 + 4 * tmp10 + 16 * x2), None,
eviction_policy='evict_last')
tmp17 = tmp15 + tmp7
tmp18 = triton_helpers.minimum(tmp17, tmp9)
tmp19 = tl.load(in_ptr0 + (tmp18 + 4 * tmp10 + 16 * x2), None,
eviction_policy='evict_last')
tmp20 = tmp19 - tmp16
tmp21 = tmp15.to(tl.float32)
tmp22 = tmp14 - tmp21
tmp23 = triton_helpers.maximum(tmp22, tmp4)
tmp24 = triton_helpers.minimum(tmp23, tmp2)
tmp25 = tmp20 * tmp24
tmp26 = tmp16 + tmp25
tmp27 = tl.load(in_ptr0 + (tmp15 + 4 * tmp6 + 16 * x2), None,
eviction_policy='evict_last')
tmp28 = tl.load(in_ptr0 + (tmp18 + 4 * tmp6 + 16 * x2), None,
eviction_policy='evict_last')
tmp29 = tmp28 - tmp27
tmp30 = tmp29 * tmp24
tmp31 = tmp27 + tmp30
tmp32 = tmp26 - tmp31
tmp33 = tmp6.to(tl.float32)
tmp34 = tmp5 - tmp33
tmp35 = triton_helpers.maximum(tmp34, tmp4)
tmp36 = triton_helpers.minimum(tmp35, tmp2)
tmp37 = tmp32 * tmp36
tmp38 = tmp31 + tmp37
tl.store(in_out_ptr0 + x4, tmp38, None)
@triton.jit
def triton_poi_fused_add_convolution_3(in_out_ptr0, in_ptr0, in_ptr1,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 4096 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_out_ptr0 + (x2 + 256 * y3), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.debug_barrier()
tl.store(in_out_ptr0 + (x2 + 256 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_4(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 256
y1 = yindex // 256
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 4096 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 256, 4, 4), (4096, 16, 4, 1))
assert_size_stride(primals_3, (256, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (256,), (1,))
assert_size_stride(primals_5, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_6, (256,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16, 16)](primals_1, buf0, 16, 16, XBLOCK=16,
YBLOCK=16, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_1[grid(65536, 9)](primals_5, buf1, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_5
buf2 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.
float32)
buf3 = buf2
del buf2
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_2[grid
(16384)](buf3, primals_2, 16384, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
buf4 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf5 = buf4
del buf4
triton_poi_fused_add_convolution_3[grid(64, 256)](buf5, buf3,
primals_4, 64, 256, XBLOCK=256, YBLOCK=1, num_warps=4, num_stages=1
)
del primals_4
buf6 = extern_kernels.convolution(buf5, buf1, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf7 = buf3
del buf3
triton_poi_fused_convolution_4[grid(1024, 16)](buf6, primals_6,
buf7, 1024, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del buf6
del primals_6
return buf7, buf0, primals_3, buf1, buf5
class _FPNUpNew(nn.Module):
def __init__(self, num_input_features, skip_channel_adjust=True):
super().__init__()
self.conv_channel_adjust = nn.Conv2d(num_input_features, 256,
kernel_size=1)
self.conv_fusion = nn.Conv2d(256, 256, kernel_size=3, padding=1)
def forward(self, input_0, input_1):
primals_3 = self.conv_channel_adjust.weight
primals_4 = self.conv_channel_adjust.bias
primals_5 = self.conv_fusion.weight
primals_6 = self.conv_fusion.bias
primals_2 = input_0
primals_1 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| simonmeister/pytorch-mono-depth | _FPNUp | false | 16,456 | [
"MIT"
] | 56 | 713c70e2fdae6d9d6e0322febadfedcaee9470d3 | https://github.com/simonmeister/pytorch-mono-depth/tree/713c70e2fdae6d9d6e0322febadfedcaee9470d3 |
TAGConv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/z2/cz2jhsdkrp52elfw55sen5tqot7bispboogikhvxocjudi232ud3.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %mm, %mm_1], -1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tl.store(out_ptr0 + (x0 + (12*x1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/e3/ce3yy4nrwi44hjujecgr3jax6u7rakuyrqx2xahfyees3v5twajq.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %mm, %mm_1], -1), kwargs = {})
triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tl.store(out_ptr0 + (x0 + (12*x1)), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 12), (12, 1))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.mm]
extern_kernels.mm(primals_2, primals_1, out=buf0)
buf4 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
buf1 = reinterpret_tensor(buf4, (4, 4), (12, 1), 8) # alias
# Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.mm]
extern_kernels.mm(primals_2, buf0, out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf4, (4, 4), (12, 1), 0) # alias
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, buf2, 16, grid=grid(16), stream=stream0)
del primals_1
buf3 = reinterpret_tensor(buf4, (4, 4), (12, 1), 4) # alias
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(buf0, buf3, 16, grid=grid(16), stream=stream0)
buf5 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, buf4, reinterpret_tensor(primals_3, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf5)
del primals_3
del primals_4
return (buf5, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 12), (12, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
class TAGConv(nn.Module):
"""
Description
-----------
TAGCN convolutional layer.
Parameters
----------
in_features : int
Dimension of input features.
out_features : int
Dimension of output features.
k : int, optional
Hyper-parameter, refer to original paper. Default: ``2``.
activation : func of torch.nn.functional, optional
Activation function. Default: ``None``.
batch_norm : bool, optional
Whether to apply batch normalization. Default: ``False``.
dropout : float, optional
Rate of dropout. Default: ``0.0``.
"""
def __init__(self, in_features, out_features, k=2, activation=None,
batch_norm=False, dropout=0.0):
super(TAGConv, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.linear = nn.Linear(in_features * (k + 1), out_features)
self.batch_norm = batch_norm
if batch_norm:
self.norm_func = nn.BatchNorm1d(out_features, affine=False)
self.activation = activation
if dropout > 0.0:
self.dropout = nn.Dropout(dropout)
else:
self.dropout = None
self.k = k
self.reset_parameters()
def reset_parameters(self):
"""Reset parameters."""
if self.activation == F.leaky_relu:
gain = nn.init.calculate_gain('leaky_relu')
else:
gain = nn.init.calculate_gain('relu')
nn.init.xavier_normal_(self.linear.weight, gain=gain)
def forward(self, x, adj):
"""
Parameters
----------
x : torch.Tensor
Tensor of input features.
adj : torch.SparseTensor
Sparse tensor of adjacency matrix.
Returns
-------
x : torch.Tensor
Output of layer.
"""
fstack = [x]
for i in range(self.k):
y = torch.spmm(adj, fstack[-1])
fstack.append(y)
x = torch.cat(fstack, dim=-1)
x = self.linear(x)
if self.batch_norm:
x = self.norm_func(x)
if self.activation is not None:
x = self.activation(x)
if self.dropout is not None:
x = self.dropout(x)
return x
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn.functional as F
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tl.store(out_ptr0 + (x0 + 12 * x1), tmp0, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tl.store(out_ptr0 + (x0 + 12 * x1), tmp0, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 12), (12, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_2, primals_1, out=buf0)
buf4 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
buf1 = reinterpret_tensor(buf4, (4, 4), (12, 1), 8)
extern_kernels.mm(primals_2, buf0, out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf4, (4, 4), (12, 1), 0)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(16)](primals_1, buf2, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_1
buf3 = reinterpret_tensor(buf4, (4, 4), (12, 1), 4)
triton_poi_fused_cat_1[grid(16)](buf0, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf5 = buf0
del buf0
extern_kernels.addmm(primals_4, buf4, reinterpret_tensor(primals_3,
(12, 4), (1, 12), 0), alpha=1, beta=1, out=buf5)
del primals_3
del primals_4
return buf5, buf4
class TAGConvNew(nn.Module):
"""
Description
-----------
TAGCN convolutional layer.
Parameters
----------
in_features : int
Dimension of input features.
out_features : int
Dimension of output features.
k : int, optional
Hyper-parameter, refer to original paper. Default: ``2``.
activation : func of torch.nn.functional, optional
Activation function. Default: ``None``.
batch_norm : bool, optional
Whether to apply batch normalization. Default: ``False``.
dropout : float, optional
Rate of dropout. Default: ``0.0``.
"""
def __init__(self, in_features, out_features, k=2, activation=None,
batch_norm=False, dropout=0.0):
super(TAGConvNew, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.linear = nn.Linear(in_features * (k + 1), out_features)
self.batch_norm = batch_norm
if batch_norm:
self.norm_func = nn.BatchNorm1d(out_features, affine=False)
self.activation = activation
if dropout > 0.0:
self.dropout = nn.Dropout(dropout)
else:
self.dropout = None
self.k = k
self.reset_parameters()
def reset_parameters(self):
"""Reset parameters."""
if self.activation == F.leaky_relu:
gain = nn.init.calculate_gain('leaky_relu')
else:
gain = nn.init.calculate_gain('relu')
nn.init.xavier_normal_(self.linear.weight, gain=gain)
def forward(self, input_0, input_1):
primals_3 = self.linear.weight
primals_4 = self.linear.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| sigeisler/grb | TAGConv | false | 16,457 | [
"MIT"
] | 51 | c89e21076dc05d1edb87dfe2eff20c29ba6bd0c1 | https://github.com/sigeisler/grb/tree/c89e21076dc05d1edb87dfe2eff20c29ba6bd0c1 |
ResNetV2 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/a2/ca2l7bjxfwrklzvcxfa2hnyzqh3p6neak37vi6fkugdhbu26fbpz.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024, 64], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 768
xnumel = 49
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (49*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (147*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5b/c5brnjme4e4oybuabwsko4vuljormwjqoawce7jgxo5fbkhzx55r.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 12
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/co/ccosum7u5lx5fx5hf5opofiygxj2ntiq67yo5gfegevmhtkaru4r.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 65536
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = (yindex // 256)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/qg/cqg4z653mpzmif22rwtpmv42y4lbkkxhxjqguwoxl3wb6cn5fn7k.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 262144
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = (yindex // 512)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (512*x2) + (4608*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ot/cotn5a2cqhwvdw4ugt6b2a4jl2ou2mh37mnmwxgwogdqw4kcufhp.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 1048576
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 1024
y1 = (yindex // 1024)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (1024*x2) + (9216*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/7n/c7npfx4cng24bae4uqu2hpgblpis6j6mmnvhinuzjms74o3kespg.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_5 = async_compile.triton('triton_poi_fused_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4194304, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4194304
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 2048
y1 = (yindex // 2048)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (2048*x2) + (18432*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ck/cck257ksuszi7rylew7fge2srrwr6phqjm3wbowg7merkmnxmshd.py
# Topologically Sorted Source Nodes: [var_mean, sub, add, sqrt, w], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add => add
# sqrt => sqrt
# sub => sub
# var_mean => var_mean
# w => div
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [1, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %getitem_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-10), kwargs = {})
# %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {})
triton_per_fused_add_div_sqrt_sub_var_mean_6 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[256, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_6(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 256
rnumel = 147
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (147*x0)), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask & xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 147, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(rmask & xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 147.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-10
tmp20 = tmp18 + tmp19
tmp21 = libdevice.sqrt(tmp20)
tmp22 = tmp0 - tmp10
tmp23 = tmp22 / tmp21
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp21, xmask)
tl.store(out_ptr1 + (r1 + (147*x0)), tmp23, rmask & xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/nt/cntsn7m3r2xjxa4wdf4m34w7p5wrpvazgixessnri35z7hkzioyv.py
# Topologically Sorted Source Nodes: [input_2], Original ATen: [aten.constant_pad_nd]
# Source node to ATen node mapping:
# input_2 => constant_pad_nd
# Graph fragment:
# %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%convolution, [1, 1, 1, 1], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_7 = async_compile.triton('triton_poi_fused_constant_pad_nd_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_7(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1183744
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = (xindex // 8704) % 34
x1 = (xindex // 256) % 34
x3 = (xindex // 295936)
x4 = xindex % 8704
x6 = xindex
tmp0 = (-1) + x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-1) + x1
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-8448) + x4 + (8192*x2) + (262144*x3)), tmp10, other=0.0)
tl.store(out_ptr0 + (x6), tmp11, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/bf/cbf5dxwqyr3hqcx4qflqp4smyzjh74okbtbp3q4x6lxhqo5bx6kt.py
# Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# input_3 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_8 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 256
x1 = (xindex // 256) % 16
x2 = (xindex // 4096) % 16
x3 = (xindex // 65536)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (512*x1) + (17408*x2) + (295936*x3)), None)
tmp1 = tl.load(in_ptr0 + (256 + x0 + (512*x1) + (17408*x2) + (295936*x3)), None)
tmp3 = tl.load(in_ptr0 + (512 + x0 + (512*x1) + (17408*x2) + (295936*x3)), None)
tmp5 = tl.load(in_ptr0 + (8704 + x0 + (512*x1) + (17408*x2) + (295936*x3)), None)
tmp7 = tl.load(in_ptr0 + (8960 + x0 + (512*x1) + (17408*x2) + (295936*x3)), None)
tmp9 = tl.load(in_ptr0 + (9216 + x0 + (512*x1) + (17408*x2) + (295936*x3)), None)
tmp11 = tl.load(in_ptr0 + (17408 + x0 + (512*x1) + (17408*x2) + (295936*x3)), None)
tmp13 = tl.load(in_ptr0 + (17664 + x0 + (512*x1) + (17408*x2) + (295936*x3)), None)
tmp15 = tl.load(in_ptr0 + (17920 + x0 + (512*x1) + (17408*x2) + (295936*x3)), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1], 1, tl.int8)
tmp19 = tl.full([1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tl.store(out_ptr0 + (x4), tmp16, None)
tl.store(out_ptr1 + (x4), tmp41, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/v6/cv6xitrbrydxmqmrppdwzu4z4evlalp2yvk4c5vppkczm3j3ligg.py
# Topologically Sorted Source Nodes: [group_norm], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# group_norm => add_1, rsqrt, var_mean_1
# Graph fragment:
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_4, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
triton_red_fused_native_group_norm_9 = async_compile.triton('triton_red_fused_native_group_norm_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[128, 2048],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_group_norm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_native_group_norm_9(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 128
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = (xindex // 32)
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 8
r3 = (rindex // 8)
tmp0 = tl.load(in_ptr0 + (r2 + (8*x0) + (256*r3) + (65536*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce(
tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(
tmp2_mean, tmp2_m2, tmp2_weight, 1
)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tl.store(out_ptr0 + (x4), tmp2, xmask)
tl.store(out_ptr1 + (x4), tmp3, xmask)
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + (x4), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/fa/cfasmvqd2wokec24hd5leef4rosb4wbws23u3moszdu6v4nwesae.py
# Topologically Sorted Source Nodes: [group_norm, out], Original ATen: [aten.native_group_norm, aten.relu]
# Source node to ATen node mapping:
# group_norm => add_2, mul_1
# out => relu
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_5), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_2), kwargs = {})
# %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_2,), kwargs = {})
triton_poi_fused_native_group_norm_relu_10 = async_compile.triton('triton_poi_fused_native_group_norm_relu_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_group_norm_relu_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 256
x2 = (xindex // 65536)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 8)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 8)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 2048.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5f/c5fdme65e6sapqxvpch33udzplyqtzt6nq7xyqsairo2b6g7a2ig.py
# Topologically Sorted Source Nodes: [input_4], Original ATen: [aten.add]
# Source node to ATen node mapping:
# input_4 => add_7
# Graph fragment:
# %add_7 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_4, %convolution_1), kwargs = {})
triton_poi_fused_add_11 = async_compile.triton('triton_poi_fused_add_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), None)
tmp1 = tl.load(in_out_ptr0 + (x0), None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x0), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ko/cko54yui74kmwqhv447fx22qwp7jnwemjonlulxgnw5dmue4lv7s.py
# Topologically Sorted Source Nodes: [group_norm_3], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# group_norm_3 => add_8, rsqrt_3, var_mean_4
# Graph fragment:
# %var_mean_4 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_6, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_10, 1e-05), kwargs = {})
# %rsqrt_3 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_8,), kwargs = {})
triton_red_fused_native_group_norm_12 = async_compile.triton('triton_red_fused_native_group_norm_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[128, 8192],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_group_norm_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_native_group_norm_12(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 128
rnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = (xindex // 32)
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 32
r3 = (rindex // 32)
tmp0 = tl.load(in_ptr0 + (r2 + (32*x0) + (1024*r3) + (262144*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce(
tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(
tmp2_mean, tmp2_m2, tmp2_weight, 1
)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tl.store(out_ptr0 + (x4), tmp2, xmask)
tl.store(out_ptr1 + (x4), tmp3, xmask)
tmp5 = 8192.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + (x4), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/v7/cv7gphlgrnxqhb3o6xz2fmlrl3qq5qreuxqsizdjdmjlei2n7flx.py
# Topologically Sorted Source Nodes: [group_norm_3, out_4], Original ATen: [aten.native_group_norm, aten.relu]
# Source node to ATen node mapping:
# group_norm_3 => add_9, mul_7
# out_4 => relu_3
# Graph fragment:
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_7, %unsqueeze_23), kwargs = {})
# %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_7, %unsqueeze_20), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_9,), kwargs = {})
triton_poi_fused_native_group_norm_relu_13 = async_compile.triton('triton_poi_fused_native_group_norm_relu_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_group_norm_relu_13(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 1024
x2 = (xindex // 262144)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 8192.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/hx/chxsqk7qaob3peavveg77grejwbdilkkgl4tcdy3fzbebvpstmlp.py
# Topologically Sorted Source Nodes: [input_5], Original ATen: [aten.add]
# Source node to ATen node mapping:
# input_5 => add_14
# Graph fragment:
# %add_14 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_7, %add_7), kwargs = {})
triton_poi_fused_add_14 = async_compile.triton('triton_poi_fused_add_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), None)
tmp1 = tl.load(in_ptr0 + (x0), None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x0), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/qx/cqxt7sx7h4rppcdgdvnzpauxfukrumn4y4ym7pao2fb35esvfhmd.py
# Topologically Sorted Source Nodes: [group_norm_13], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# group_norm_13 => add_31, rsqrt_13, var_mean_14
# Graph fragment:
# %var_mean_14 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_26, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_31 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_30, 1e-05), kwargs = {})
# %rsqrt_13 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_31,), kwargs = {})
triton_red_fused_native_group_norm_15 = async_compile.triton('triton_red_fused_native_group_norm_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[128, 4096],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_group_norm_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_native_group_norm_15(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 128
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = (xindex // 32)
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 16
r3 = (rindex // 16)
tmp0 = tl.load(in_ptr0 + (r2 + (16*x0) + (512*r3) + (131072*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce(
tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(
tmp2_mean, tmp2_m2, tmp2_weight, 1
)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tl.store(out_ptr0 + (x4), tmp2, xmask)
tl.store(out_ptr1 + (x4), tmp3, xmask)
tmp5 = 4096.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + (x4), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/56/c56vczdotihmfda6u5ucl2xnug7va7wbnyjzf255dvfislcptvf7.py
# Topologically Sorted Source Nodes: [group_norm_13, relu_13], Original ATen: [aten.native_group_norm, aten.relu]
# Source node to ATen node mapping:
# group_norm_13 => add_32, mul_27
# relu_13 => relu_13
# Graph fragment:
# %mul_27 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_27, %unsqueeze_83), kwargs = {})
# %add_32 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_27, %unsqueeze_80), kwargs = {})
# %relu_13 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_32,), kwargs = {})
triton_poi_fused_native_group_norm_relu_16 = async_compile.triton('triton_poi_fused_native_group_norm_relu_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_group_norm_relu_16(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 512
x2 = (xindex // 131072)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 16)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 16)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 4096.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ah/cahyvhivy62lbh4u2msdkpocyupcs745a4q3nzkgxi4l7dimdlb5.py
# Topologically Sorted Source Nodes: [group_norm_14], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# group_norm_14 => add_33, rsqrt_14, var_mean_15
# Graph fragment:
# %var_mean_15 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_28, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_33 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_32, 1e-05), kwargs = {})
# %rsqrt_14 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_33,), kwargs = {})
triton_per_fused_native_group_norm_17 = async_compile.triton('triton_per_fused_native_group_norm_17', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[128, 1024],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_17', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_native_group_norm_17(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel):
xnumel = 128
XBLOCK: tl.constexpr = 1
rnumel = 1024
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r2 = rindex % 16
r3 = (rindex // 16)
x0 = xindex % 32
x1 = (xindex // 32)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + (16*x0) + (512*r3) + (32768*x1)), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + (x4), tmp18, None)
tl.store(out_ptr0 + (x4), tmp8, None)
tl.store(out_ptr1 + (x4), tmp13, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/4p/c4p77icwssrq64e72yfmj4vwfu2dkw3vd26qkd2p6fyjezfd6j74.py
# Topologically Sorted Source Nodes: [group_norm_14, relu_14], Original ATen: [aten.native_group_norm, aten.relu]
# Source node to ATen node mapping:
# group_norm_14 => add_34, mul_29
# relu_14 => relu_14
# Graph fragment:
# %mul_29 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_29, %unsqueeze_89), kwargs = {})
# %add_34 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_29, %unsqueeze_86), kwargs = {})
# %relu_14 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_34,), kwargs = {})
triton_poi_fused_native_group_norm_relu_18 = async_compile.triton('triton_poi_fused_native_group_norm_relu_18', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_18', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_group_norm_relu_18(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 512
x2 = (xindex // 32768)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 16)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 16)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 1024.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/wd/cwdaii5fkuthctbzoypogvwcznsd5ng7ytvtvzxr32wd5euxn2qa.py
# Topologically Sorted Source Nodes: [input_8], Original ATen: [aten.add]
# Source node to ATen node mapping:
# input_8 => add_35
# Graph fragment:
# %add_35 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_17, %convolution_14), kwargs = {})
triton_poi_fused_add_19 = async_compile.triton('triton_poi_fused_add_19', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_19', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_19(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), None)
tmp1 = tl.load(in_out_ptr0 + (x0), None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x0), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/pg/cpgh4s6y63mcbvrcx22o7452pgyc4t7nl6tvbtkz5hpzshqxzqxn.py
# Topologically Sorted Source Nodes: [group_norm_15], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# group_norm_15 => add_36, rsqrt_15, var_mean_16
# Graph fragment:
# %var_mean_16 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_30, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_36 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_34, 1e-05), kwargs = {})
# %rsqrt_15 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_36,), kwargs = {})
triton_red_fused_native_group_norm_20 = async_compile.triton('triton_red_fused_native_group_norm_20', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[128, 4096],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_group_norm_20', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_native_group_norm_20(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 128
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = (xindex // 32)
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 64
r3 = (rindex // 64)
tmp0 = tl.load(in_ptr0 + (r2 + (64*x0) + (2048*r3) + (131072*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce(
tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(
tmp2_mean, tmp2_m2, tmp2_weight, 1
)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tl.store(out_ptr0 + (x4), tmp2, xmask)
tl.store(out_ptr1 + (x4), tmp3, xmask)
tmp5 = 4096.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + (x4), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/dg/cdg5tttxk7465izpys3jcdfkeg2jlo2zu77au5mqsay37xhdxofi.py
# Topologically Sorted Source Nodes: [group_norm_15, out_20], Original ATen: [aten.native_group_norm, aten.relu]
# Source node to ATen node mapping:
# group_norm_15 => add_37, mul_31
# out_20 => relu_15
# Graph fragment:
# %mul_31 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_31, %unsqueeze_95), kwargs = {})
# %add_37 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_31, %unsqueeze_92), kwargs = {})
# %relu_15 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_37,), kwargs = {})
triton_poi_fused_native_group_norm_relu_21 = async_compile.triton('triton_poi_fused_native_group_norm_relu_21', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_21', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_group_norm_relu_21(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 2048
x2 = (xindex // 131072)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 64)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 64)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 4096.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/pb/cpb2jrw7y3rjx64oigpgehcu5j5v3yzdzflecuivlmkbk4v45xlq.py
# Topologically Sorted Source Nodes: [input_9], Original ATen: [aten.add]
# Source node to ATen node mapping:
# input_9 => add_42
# Graph fragment:
# %add_42 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_20, %add_35), kwargs = {})
triton_poi_fused_add_22 = async_compile.triton('triton_poi_fused_add_22', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_22', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_22(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), None)
tmp1 = tl.load(in_ptr0 + (x0), None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x0), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/u2/cu2mfguyheid3lhsertsqwgzhmbcdboexa6orv77am6dgv3c56tz.py
# Topologically Sorted Source Nodes: [group_norm_25], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# group_norm_25 => add_59, rsqrt_25, var_mean_26
# Graph fragment:
# %var_mean_26 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_50, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_59 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_54, 1e-05), kwargs = {})
# %rsqrt_25 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_59,), kwargs = {})
triton_red_fused_native_group_norm_23 = async_compile.triton('triton_red_fused_native_group_norm_23', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[128, 2048],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_group_norm_23', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_native_group_norm_23(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 128
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = (xindex // 32)
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 32
r3 = (rindex // 32)
tmp0 = tl.load(in_ptr0 + (r2 + (32*x0) + (1024*r3) + (65536*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce(
tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(
tmp2_mean, tmp2_m2, tmp2_weight, 1
)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tl.store(out_ptr0 + (x4), tmp2, xmask)
tl.store(out_ptr1 + (x4), tmp3, xmask)
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + (x4), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/2i/c2ibn4t36ftepehjhmjy2lhy2k64sos4xxmmc3rqlr4ipxgj7kvu.py
# Topologically Sorted Source Nodes: [group_norm_25, relu_25], Original ATen: [aten.native_group_norm, aten.relu]
# Source node to ATen node mapping:
# group_norm_25 => add_60, mul_51
# relu_25 => relu_25
# Graph fragment:
# %mul_51 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_51, %unsqueeze_155), kwargs = {})
# %add_60 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_51, %unsqueeze_152), kwargs = {})
# %relu_25 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_60,), kwargs = {})
triton_poi_fused_native_group_norm_relu_24 = async_compile.triton('triton_poi_fused_native_group_norm_relu_24', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_24', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_group_norm_relu_24(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 1024
x2 = (xindex // 65536)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 2048.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ts/ctsijwy2nkmeeicjj3guvwcwmca4hjmvf76h5bv5ih53bxzwmxia.py
# Topologically Sorted Source Nodes: [group_norm_26], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# group_norm_26 => add_61, rsqrt_26, var_mean_27
# Graph fragment:
# %var_mean_27 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_52, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_61 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_56, 1e-05), kwargs = {})
# %rsqrt_26 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_61,), kwargs = {})
triton_per_fused_native_group_norm_25 = async_compile.triton('triton_per_fused_native_group_norm_25', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[128, 512],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_25', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_native_group_norm_25(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel):
xnumel = 128
XBLOCK: tl.constexpr = 1
rnumel = 512
RBLOCK: tl.constexpr = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r2 = rindex % 32
r3 = (rindex // 32)
x0 = xindex % 32
x1 = (xindex // 32)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + (32*x0) + (1024*r3) + (16384*x1)), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 512, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 512.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + (x4), tmp18, None)
tl.store(out_ptr0 + (x4), tmp8, None)
tl.store(out_ptr1 + (x4), tmp13, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/s4/cs4p5dn7xucm63y5ztk2foxz2etqrvzlzgrzfczfalaql4eiulzx.py
# Topologically Sorted Source Nodes: [group_norm_26, relu_26], Original ATen: [aten.native_group_norm, aten.relu]
# Source node to ATen node mapping:
# group_norm_26 => add_62, mul_53
# relu_26 => relu_26
# Graph fragment:
# %mul_53 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_53, %unsqueeze_161), kwargs = {})
# %add_62 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_53, %unsqueeze_158), kwargs = {})
# %relu_26 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_62,), kwargs = {})
triton_poi_fused_native_group_norm_relu_26 = async_compile.triton('triton_poi_fused_native_group_norm_relu_26', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_26', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_group_norm_relu_26(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 1024
x2 = (xindex // 16384)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 512.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/cs/ccsvazrebzcai6zrturgyw3jbkxvss7iwwocp74sbpz5o47xqnh5.py
# Topologically Sorted Source Nodes: [input_12], Original ATen: [aten.add]
# Source node to ATen node mapping:
# input_12 => add_63
# Graph fragment:
# %add_63 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_30, %convolution_27), kwargs = {})
triton_poi_fused_add_27 = async_compile.triton('triton_poi_fused_add_27', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_27', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_27(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), None)
tmp1 = tl.load(in_out_ptr0 + (x0), None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x0), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/q3/cq3u5xqyiucbjm2ljjvmh575yiuz54bhlkwarxw3l4jaccbxrb7b.py
# Topologically Sorted Source Nodes: [group_norm_27], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# group_norm_27 => add_64, rsqrt_27, var_mean_28
# Graph fragment:
# %var_mean_28 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_54, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_64 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_58, 1e-05), kwargs = {})
# %rsqrt_27 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_64,), kwargs = {})
triton_red_fused_native_group_norm_28 = async_compile.triton('triton_red_fused_native_group_norm_28', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[128, 2048],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_group_norm_28', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_native_group_norm_28(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 128
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = (xindex // 32)
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 128
r3 = (rindex // 128)
tmp0 = tl.load(in_ptr0 + (r2 + (128*x0) + (4096*r3) + (65536*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce(
tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(
tmp2_mean, tmp2_m2, tmp2_weight, 1
)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tl.store(out_ptr0 + (x4), tmp2, xmask)
tl.store(out_ptr1 + (x4), tmp3, xmask)
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + (x4), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/et/cet6cogmkjub3gngbqx53hrtwaggsomi75nwjc4lff3hrlcydjb2.py
# Topologically Sorted Source Nodes: [group_norm_27, out_36], Original ATen: [aten.native_group_norm, aten.relu]
# Source node to ATen node mapping:
# group_norm_27 => add_65, mul_55
# out_36 => relu_27
# Graph fragment:
# %mul_55 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_55, %unsqueeze_167), kwargs = {})
# %add_65 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_55, %unsqueeze_164), kwargs = {})
# %relu_27 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_65,), kwargs = {})
triton_poi_fused_native_group_norm_relu_29 = async_compile.triton('triton_poi_fused_native_group_norm_relu_29', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_29', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_group_norm_relu_29(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 4096
x2 = (xindex // 65536)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 128)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 128)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 2048.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/mi/cmiq5wwxjkdlvsolp5gi4emkkr36p4chwtezhbmw2yawiz6vzl6s.py
# Topologically Sorted Source Nodes: [input_13], Original ATen: [aten.add]
# Source node to ATen node mapping:
# input_13 => add_70
# Graph fragment:
# %add_70 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_33, %add_63), kwargs = {})
triton_poi_fused_add_30 = async_compile.triton('triton_poi_fused_add_30', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_30', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_30(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), None)
tmp1 = tl.load(in_ptr0 + (x0), None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x0), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/hl/chlapuas22bbqfgfcakfby65m6nqcszwr7h7zb4db6prtecb7rgx.py
# Topologically Sorted Source Nodes: [group_norm_37], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# group_norm_37 => add_87, rsqrt_37, var_mean_38
# Graph fragment:
# %var_mean_38 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_74, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_87 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_78, 1e-05), kwargs = {})
# %rsqrt_37 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_87,), kwargs = {})
triton_per_fused_native_group_norm_31 = async_compile.triton('triton_per_fused_native_group_norm_31', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[128, 1024],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_31', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_native_group_norm_31(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel):
xnumel = 128
XBLOCK: tl.constexpr = 1
rnumel = 1024
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r2 = rindex % 64
r3 = (rindex // 64)
x0 = xindex % 32
x1 = (xindex // 32)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + (64*x0) + (2048*r3) + (32768*x1)), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + (x4), tmp18, None)
tl.store(out_ptr0 + (x4), tmp8, None)
tl.store(out_ptr1 + (x4), tmp13, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/4w/c4wcwkbz4iluaatvhbtcyyvy4lghw5dhm67346wjsavgjxqqyisn.py
# Topologically Sorted Source Nodes: [group_norm_37, relu_37], Original ATen: [aten.native_group_norm, aten.relu]
# Source node to ATen node mapping:
# group_norm_37 => add_88, mul_75
# relu_37 => relu_37
# Graph fragment:
# %mul_75 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_75, %unsqueeze_227), kwargs = {})
# %add_88 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_75, %unsqueeze_224), kwargs = {})
# %relu_37 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_88,), kwargs = {})
triton_poi_fused_native_group_norm_relu_32 = async_compile.triton('triton_poi_fused_native_group_norm_relu_32', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_32', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_group_norm_relu_32(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 2048
x2 = (xindex // 32768)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 64)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 64)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 1024.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ef/cefla5ncukzquxkvhoz24sn6jxxmtsm4con6ztnl73s5gbqg5c36.py
# Topologically Sorted Source Nodes: [group_norm_38], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# group_norm_38 => add_89, rsqrt_38, var_mean_39
# Graph fragment:
# %var_mean_39 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_76, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_89 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_80, 1e-05), kwargs = {})
# %rsqrt_38 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_89,), kwargs = {})
triton_per_fused_native_group_norm_33 = async_compile.triton('triton_per_fused_native_group_norm_33', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[128, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_33', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_native_group_norm_33(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel):
xnumel = 128
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r2 = rindex % 64
r3 = (rindex // 64)
x0 = xindex % 32
x1 = (xindex // 32)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + (64*x0) + (2048*r3) + (8192*x1)), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 256, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 256.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + (x4), tmp18, None)
tl.store(out_ptr0 + (x4), tmp8, None)
tl.store(out_ptr1 + (x4), tmp13, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/2l/c2lztqwkyp4vlp5rzelzkmfsnc2qfyzq43vgxya3h3yybdxldxlr.py
# Topologically Sorted Source Nodes: [group_norm_38, relu_38], Original ATen: [aten.native_group_norm, aten.relu]
# Source node to ATen node mapping:
# group_norm_38 => add_90, mul_77
# relu_38 => relu_38
# Graph fragment:
# %mul_77 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_77, %unsqueeze_233), kwargs = {})
# %add_90 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_77, %unsqueeze_230), kwargs = {})
# %relu_38 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_90,), kwargs = {})
triton_poi_fused_native_group_norm_relu_34 = async_compile.triton('triton_poi_fused_native_group_norm_relu_34', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_34', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_group_norm_relu_34(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 2048
x2 = (xindex // 8192)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 64)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 64)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 256.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/at/cathviy462t2m7xrfdix74co7y4jqx4tpjom2gsa6vrepusnn7yl.py
# Topologically Sorted Source Nodes: [input_16], Original ATen: [aten.add]
# Source node to ATen node mapping:
# input_16 => add_91
# Graph fragment:
# %add_91 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_43, %convolution_40), kwargs = {})
triton_poi_fused_add_35 = async_compile.triton('triton_poi_fused_add_35', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_35', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_35(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), None)
tmp1 = tl.load(in_out_ptr0 + (x0), None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x0), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ok/cokbbgdxp4kvsqs7ly6tgurysy2ycpzdknvrwmfkzaeskt55cixj.py
# Topologically Sorted Source Nodes: [group_norm_39], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# group_norm_39 => add_92, rsqrt_39, var_mean_40
# Graph fragment:
# %var_mean_40 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_78, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_92 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_82, 1e-05), kwargs = {})
# %rsqrt_39 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_92,), kwargs = {})
triton_per_fused_native_group_norm_36 = async_compile.triton('triton_per_fused_native_group_norm_36', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[128, 1024],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_36', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_native_group_norm_36(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel):
xnumel = 128
XBLOCK: tl.constexpr = 1
rnumel = 1024
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r2 = rindex % 256
r3 = (rindex // 256)
x0 = xindex % 32
x1 = (xindex // 32)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + (256*x0) + (8192*r3) + (32768*x1)), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + (x4), tmp18, None)
tl.store(out_ptr0 + (x4), tmp8, None)
tl.store(out_ptr1 + (x4), tmp13, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/r4/cr4zapi6cfgujbfor3eajpuxzwpgdg4qetlbu4zys3s7etdbkreo.py
# Topologically Sorted Source Nodes: [group_norm_39, out_52], Original ATen: [aten.native_group_norm, aten.relu]
# Source node to ATen node mapping:
# group_norm_39 => add_93, mul_79
# out_52 => relu_39
# Graph fragment:
# %mul_79 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_79, %unsqueeze_239), kwargs = {})
# %add_93 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_79, %unsqueeze_236), kwargs = {})
# %relu_39 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_93,), kwargs = {})
triton_poi_fused_native_group_norm_relu_37 = async_compile.triton('triton_poi_fused_native_group_norm_relu_37', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_37', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_group_norm_relu_37(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 8192
x2 = (xindex // 32768)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 256)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 256)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 1024.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/7f/c7f5o4vhy6f4tzv2uapg7b7puvccoydkpylbod2pu5letkcrnjfg.py
# Topologically Sorted Source Nodes: [input_17], Original ATen: [aten.add]
# Source node to ATen node mapping:
# input_17 => add_98
# Graph fragment:
# %add_98 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_46, %add_91), kwargs = {})
triton_poi_fused_add_38 = async_compile.triton('triton_poi_fused_add_38', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_38', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_38(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), None)
tmp1 = tl.load(in_ptr0 + (x0), None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x0), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/qk/cqkj5qfedtmbikvtontzig3tfyoit2n72by3qha7pva74opsqycz.py
# Topologically Sorted Source Nodes: [input_20], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# input_20 => add_113, rsqrt_48, var_mean_49
# Graph fragment:
# %var_mean_49 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_96, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_113 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_100, 1e-05), kwargs = {})
# %rsqrt_48 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_113,), kwargs = {})
triton_per_fused_native_group_norm_39 = async_compile.triton('triton_per_fused_native_group_norm_39', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[128, 1024],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_39', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_native_group_norm_39(in_out_ptr0, in_ptr0, out_ptr0, xnumel, rnumel):
xnumel = 128
XBLOCK: tl.constexpr = 1
rnumel = 1024
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r2 = rindex % 256
r3 = (rindex // 256)
x0 = xindex % 32
x1 = (xindex // 32)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + (256*x0) + (8192*r3) + (32768*x1)), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x4), tmp18, None)
tl.store(out_ptr0 + (x4), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/du/cdun2xs7seqmwjqgyp2nzcoxmjhy6zhqu7ptyozhimwsb6z6fk2b.py
# Topologically Sorted Source Nodes: [input_20, input_21, input_22], Original ATen: [aten.native_group_norm, aten.relu, aten.mean]
# Source node to ATen node mapping:
# input_20 => add_114, mul_97
# input_21 => relu_48
# input_22 => mean
# Graph fragment:
# %mul_97 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_97, %unsqueeze_293), kwargs = {})
# %add_114 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_97, %unsqueeze_290), kwargs = {})
# %relu_48 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%add_114,), kwargs = {})
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%relu_48, [-1, -2], True), kwargs = {})
triton_poi_fused_mean_native_group_norm_relu_40 = async_compile.triton('triton_poi_fused_mean_native_group_norm_relu_40', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_native_group_norm_relu_40', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_native_group_norm_relu_40(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8192
x1 = (xindex // 8192)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (32768*x1)), None)
tmp1 = tl.load(in_ptr1 + ((x2 // 256)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + ((x2 // 256)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (8192 + x0 + (32768*x1)), None)
tmp18 = tl.load(in_ptr0 + (16384 + x0 + (32768*x1)), None)
tmp25 = tl.load(in_ptr0 + (24576 + x0 + (32768*x1)), None)
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = tmp11 - tmp1
tmp13 = tmp12 * tmp3
tmp14 = tmp13 * tmp5
tmp15 = tmp14 + tmp7
tmp16 = triton_helpers.maximum(tmp9, tmp15)
tmp17 = tmp10 + tmp16
tmp19 = tmp18 - tmp1
tmp20 = tmp19 * tmp3
tmp21 = tmp20 * tmp5
tmp22 = tmp21 + tmp7
tmp23 = triton_helpers.maximum(tmp9, tmp22)
tmp24 = tmp17 + tmp23
tmp26 = tmp25 - tmp1
tmp27 = tmp26 * tmp3
tmp28 = tmp27 * tmp5
tmp29 = tmp28 + tmp7
tmp30 = triton_helpers.maximum(tmp9, tmp29)
tmp31 = tmp24 + tmp30
tmp32 = 4.0
tmp33 = tmp31 / tmp32
tl.store(out_ptr0 + (x2), tmp33, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/jc/cjcnicxv5h7prutathudeetq66kicy3jgfixcauhyzxf4kbpduoz.py
# Topologically Sorted Source Nodes: [input_23], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# input_23 => convolution_53
# Graph fragment:
# %convolution_53 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mean, %primals_153, %primals_154, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_41 = async_compile.triton('triton_poi_fused_convolution_41', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_41', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_41(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 87372
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 21843
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102, primals_103, primals_104, primals_105, primals_106, primals_107, primals_108, primals_109, primals_110, primals_111, primals_112, primals_113, primals_114, primals_115, primals_116, primals_117, primals_118, primals_119, primals_120, primals_121, primals_122, primals_123, primals_124, primals_125, primals_126, primals_127, primals_128, primals_129, primals_130, primals_131, primals_132, primals_133, primals_134, primals_135, primals_136, primals_137, primals_138, primals_139, primals_140, primals_141, primals_142, primals_143, primals_144, primals_145, primals_146, primals_147, primals_148, primals_149, primals_150, primals_151, primals_152, primals_153, primals_154 = args
args.clear()
assert_size_stride(primals_1, (256, 3, 7, 7), (147, 49, 7, 1))
assert_size_stride(primals_2, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_3, (256, ), (1, ))
assert_size_stride(primals_4, (256, ), (1, ))
assert_size_stride(primals_5, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_6, (256, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_7, (256, ), (1, ))
assert_size_stride(primals_8, (256, ), (1, ))
assert_size_stride(primals_9, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_10, (256, ), (1, ))
assert_size_stride(primals_11, (256, ), (1, ))
assert_size_stride(primals_12, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_13, (1024, ), (1, ))
assert_size_stride(primals_14, (1024, ), (1, ))
assert_size_stride(primals_15, (256, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_16, (256, ), (1, ))
assert_size_stride(primals_17, (256, ), (1, ))
assert_size_stride(primals_18, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_19, (256, ), (1, ))
assert_size_stride(primals_20, (256, ), (1, ))
assert_size_stride(primals_21, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_22, (1024, ), (1, ))
assert_size_stride(primals_23, (1024, ), (1, ))
assert_size_stride(primals_24, (256, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_25, (256, ), (1, ))
assert_size_stride(primals_26, (256, ), (1, ))
assert_size_stride(primals_27, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_28, (256, ), (1, ))
assert_size_stride(primals_29, (256, ), (1, ))
assert_size_stride(primals_30, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_31, (1024, ), (1, ))
assert_size_stride(primals_32, (1024, ), (1, ))
assert_size_stride(primals_33, (256, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_34, (256, ), (1, ))
assert_size_stride(primals_35, (256, ), (1, ))
assert_size_stride(primals_36, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_37, (256, ), (1, ))
assert_size_stride(primals_38, (256, ), (1, ))
assert_size_stride(primals_39, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_40, (1024, ), (1, ))
assert_size_stride(primals_41, (1024, ), (1, ))
assert_size_stride(primals_42, (2048, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_43, (512, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_44, (512, ), (1, ))
assert_size_stride(primals_45, (512, ), (1, ))
assert_size_stride(primals_46, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_47, (512, ), (1, ))
assert_size_stride(primals_48, (512, ), (1, ))
assert_size_stride(primals_49, (2048, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_50, (2048, ), (1, ))
assert_size_stride(primals_51, (2048, ), (1, ))
assert_size_stride(primals_52, (512, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_53, (512, ), (1, ))
assert_size_stride(primals_54, (512, ), (1, ))
assert_size_stride(primals_55, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_56, (512, ), (1, ))
assert_size_stride(primals_57, (512, ), (1, ))
assert_size_stride(primals_58, (2048, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_59, (2048, ), (1, ))
assert_size_stride(primals_60, (2048, ), (1, ))
assert_size_stride(primals_61, (512, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_62, (512, ), (1, ))
assert_size_stride(primals_63, (512, ), (1, ))
assert_size_stride(primals_64, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_65, (512, ), (1, ))
assert_size_stride(primals_66, (512, ), (1, ))
assert_size_stride(primals_67, (2048, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_68, (2048, ), (1, ))
assert_size_stride(primals_69, (2048, ), (1, ))
assert_size_stride(primals_70, (512, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_71, (512, ), (1, ))
assert_size_stride(primals_72, (512, ), (1, ))
assert_size_stride(primals_73, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_74, (512, ), (1, ))
assert_size_stride(primals_75, (512, ), (1, ))
assert_size_stride(primals_76, (2048, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_77, (2048, ), (1, ))
assert_size_stride(primals_78, (2048, ), (1, ))
assert_size_stride(primals_79, (4096, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_80, (1024, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_81, (1024, ), (1, ))
assert_size_stride(primals_82, (1024, ), (1, ))
assert_size_stride(primals_83, (1024, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_84, (1024, ), (1, ))
assert_size_stride(primals_85, (1024, ), (1, ))
assert_size_stride(primals_86, (4096, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_87, (4096, ), (1, ))
assert_size_stride(primals_88, (4096, ), (1, ))
assert_size_stride(primals_89, (1024, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_90, (1024, ), (1, ))
assert_size_stride(primals_91, (1024, ), (1, ))
assert_size_stride(primals_92, (1024, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_93, (1024, ), (1, ))
assert_size_stride(primals_94, (1024, ), (1, ))
assert_size_stride(primals_95, (4096, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_96, (4096, ), (1, ))
assert_size_stride(primals_97, (4096, ), (1, ))
assert_size_stride(primals_98, (1024, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_99, (1024, ), (1, ))
assert_size_stride(primals_100, (1024, ), (1, ))
assert_size_stride(primals_101, (1024, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_102, (1024, ), (1, ))
assert_size_stride(primals_103, (1024, ), (1, ))
assert_size_stride(primals_104, (4096, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_105, (4096, ), (1, ))
assert_size_stride(primals_106, (4096, ), (1, ))
assert_size_stride(primals_107, (1024, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_108, (1024, ), (1, ))
assert_size_stride(primals_109, (1024, ), (1, ))
assert_size_stride(primals_110, (1024, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_111, (1024, ), (1, ))
assert_size_stride(primals_112, (1024, ), (1, ))
assert_size_stride(primals_113, (4096, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_114, (4096, ), (1, ))
assert_size_stride(primals_115, (4096, ), (1, ))
assert_size_stride(primals_116, (8192, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_117, (2048, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_118, (2048, ), (1, ))
assert_size_stride(primals_119, (2048, ), (1, ))
assert_size_stride(primals_120, (2048, 2048, 3, 3), (18432, 9, 3, 1))
assert_size_stride(primals_121, (2048, ), (1, ))
assert_size_stride(primals_122, (2048, ), (1, ))
assert_size_stride(primals_123, (8192, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_124, (8192, ), (1, ))
assert_size_stride(primals_125, (8192, ), (1, ))
assert_size_stride(primals_126, (2048, 8192, 1, 1), (8192, 1, 1, 1))
assert_size_stride(primals_127, (2048, ), (1, ))
assert_size_stride(primals_128, (2048, ), (1, ))
assert_size_stride(primals_129, (2048, 2048, 3, 3), (18432, 9, 3, 1))
assert_size_stride(primals_130, (2048, ), (1, ))
assert_size_stride(primals_131, (2048, ), (1, ))
assert_size_stride(primals_132, (8192, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_133, (8192, ), (1, ))
assert_size_stride(primals_134, (8192, ), (1, ))
assert_size_stride(primals_135, (2048, 8192, 1, 1), (8192, 1, 1, 1))
assert_size_stride(primals_136, (2048, ), (1, ))
assert_size_stride(primals_137, (2048, ), (1, ))
assert_size_stride(primals_138, (2048, 2048, 3, 3), (18432, 9, 3, 1))
assert_size_stride(primals_139, (2048, ), (1, ))
assert_size_stride(primals_140, (2048, ), (1, ))
assert_size_stride(primals_141, (8192, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_142, (8192, ), (1, ))
assert_size_stride(primals_143, (8192, ), (1, ))
assert_size_stride(primals_144, (2048, 8192, 1, 1), (8192, 1, 1, 1))
assert_size_stride(primals_145, (2048, ), (1, ))
assert_size_stride(primals_146, (2048, ), (1, ))
assert_size_stride(primals_147, (2048, 2048, 3, 3), (18432, 9, 3, 1))
assert_size_stride(primals_148, (2048, ), (1, ))
assert_size_stride(primals_149, (2048, ), (1, ))
assert_size_stride(primals_150, (8192, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_151, (8192, ), (1, ))
assert_size_stride(primals_152, (8192, ), (1, ))
assert_size_stride(primals_153, (21843, 8192, 1, 1), (8192, 1, 1, 1))
assert_size_stride(primals_154, (21843, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((256, 3, 7, 7), (147, 1, 21, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 768, 49, grid=grid(768, 49), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_2, buf1, 12, 4096, grid=grid(12, 4096), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_9, buf2, 65536, 9, grid=grid(65536, 9), stream=stream0)
del primals_9
buf3 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_18, buf3, 65536, 9, grid=grid(65536, 9), stream=stream0)
del primals_18
buf4 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_27, buf4, 65536, 9, grid=grid(65536, 9), stream=stream0)
del primals_27
buf5 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_36, buf5, 65536, 9, grid=grid(65536, 9), stream=stream0)
del primals_36
buf6 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_46, buf6, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_46
buf7 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_55, buf7, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_55
buf8 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_64, buf8, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_64
buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_73, buf9, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_73
buf10 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(primals_83, buf10, 1048576, 9, grid=grid(1048576, 9), stream=stream0)
del primals_83
buf11 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(primals_92, buf11, 1048576, 9, grid=grid(1048576, 9), stream=stream0)
del primals_92
buf12 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(primals_101, buf12, 1048576, 9, grid=grid(1048576, 9), stream=stream0)
del primals_101
buf13 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(primals_110, buf13, 1048576, 9, grid=grid(1048576, 9), stream=stream0)
del primals_110
buf14 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144, 2048), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_5.run(primals_120, buf14, 4194304, 9, grid=grid(4194304, 9), stream=stream0)
del primals_120
buf15 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144, 2048), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_5.run(primals_129, buf15, 4194304, 9, grid=grid(4194304, 9), stream=stream0)
del primals_129
buf16 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144, 2048), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_5.run(primals_138, buf16, 4194304, 9, grid=grid(4194304, 9), stream=stream0)
del primals_138
buf17 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144, 2048), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_5.run(primals_147, buf17, 4194304, 9, grid=grid(4194304, 9), stream=stream0)
del primals_147
buf19 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32)
buf21 = reinterpret_tensor(buf19, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf19 # reuse
buf22 = empty_strided_cuda((256, 3, 7, 7), (147, 1, 21, 3), torch.float32)
# Topologically Sorted Source Nodes: [var_mean, sub, add, sqrt, w], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_6.run(buf21, buf0, buf22, 256, 147, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution]
buf23 = extern_kernels.convolution(buf1, buf22, stride=(2, 2), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf23, (4, 256, 32, 32), (262144, 1, 8192, 256))
buf24 = empty_strided_cuda((4, 256, 34, 34), (295936, 1, 8704, 256), torch.float32)
# Topologically Sorted Source Nodes: [input_2], Original ATen: [aten.constant_pad_nd]
triton_poi_fused_constant_pad_nd_7.run(buf23, buf24, 1183744, grid=grid(1183744), stream=stream0)
buf25 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32)
buf26 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.int8)
# Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_8.run(buf24, buf25, buf26, 262144, grid=grid(262144), stream=stream0)
buf27 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf28 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf30 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_9.run(buf25, buf27, buf28, buf30, 128, 2048, grid=grid(128), stream=stream0)
buf31 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32)
# Topologically Sorted Source Nodes: [group_norm, out], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_10.run(buf25, buf27, buf28, primals_3, primals_4, buf31, 262144, grid=grid(262144), stream=stream0)
del primals_4
# Topologically Sorted Source Nodes: [residual], Original ATen: [aten.convolution]
buf32 = extern_kernels.convolution(buf31, primals_5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 1024, 16, 16), (262144, 1, 16384, 1024))
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
buf33 = extern_kernels.convolution(buf31, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf33, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf34 = buf28; del buf28 # reuse
buf35 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf37 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_1], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_9.run(buf33, buf34, buf35, buf37, 128, 2048, grid=grid(128), stream=stream0)
buf38 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_1, relu_1], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_10.run(buf33, buf34, buf35, primals_7, primals_8, buf38, 262144, grid=grid(262144), stream=stream0)
del primals_8
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution]
buf39 = extern_kernels.convolution(buf38, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf39, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf40 = buf35; del buf35 # reuse
buf41 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf43 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_2], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_9.run(buf39, buf40, buf41, buf43, 128, 2048, grid=grid(128), stream=stream0)
buf44 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_2, relu_2], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_10.run(buf39, buf40, buf41, primals_10, primals_11, buf44, 262144, grid=grid(262144), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.convolution]
buf45 = extern_kernels.convolution(buf44, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf45, (4, 1024, 16, 16), (262144, 1, 16384, 1024))
buf46 = buf32; del buf32 # reuse
# Topologically Sorted Source Nodes: [input_4], Original ATen: [aten.add]
triton_poi_fused_add_11.run(buf46, buf45, 1048576, grid=grid(1048576), stream=stream0)
buf47 = buf41; del buf41 # reuse
buf48 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf50 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_3], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_12.run(buf46, buf47, buf48, buf50, 128, 8192, grid=grid(128), stream=stream0)
buf51 = buf45; del buf45 # reuse
# Topologically Sorted Source Nodes: [group_norm_3, out_4], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_13.run(buf46, buf47, buf48, primals_13, primals_14, buf51, 1048576, grid=grid(1048576), stream=stream0)
del primals_14
# Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.convolution]
buf52 = extern_kernels.convolution(buf51, primals_15, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf52, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf53 = buf48; del buf48 # reuse
buf54 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf56 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_4], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_9.run(buf52, buf53, buf54, buf56, 128, 2048, grid=grid(128), stream=stream0)
buf57 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_4, relu_4], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_10.run(buf52, buf53, buf54, primals_16, primals_17, buf57, 262144, grid=grid(262144), stream=stream0)
del primals_17
# Topologically Sorted Source Nodes: [out_6], Original ATen: [aten.convolution]
buf58 = extern_kernels.convolution(buf57, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf58, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf59 = buf54; del buf54 # reuse
buf60 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf62 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_5], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_9.run(buf58, buf59, buf60, buf62, 128, 2048, grid=grid(128), stream=stream0)
buf63 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_5, relu_5], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_10.run(buf58, buf59, buf60, primals_19, primals_20, buf63, 262144, grid=grid(262144), stream=stream0)
del primals_20
# Topologically Sorted Source Nodes: [out_7], Original ATen: [aten.convolution]
buf64 = extern_kernels.convolution(buf63, primals_21, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf64, (4, 1024, 16, 16), (262144, 1, 16384, 1024))
buf65 = buf64; del buf64 # reuse
# Topologically Sorted Source Nodes: [input_5], Original ATen: [aten.add]
triton_poi_fused_add_14.run(buf65, buf46, 1048576, grid=grid(1048576), stream=stream0)
buf66 = buf60; del buf60 # reuse
buf67 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf69 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_6], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_12.run(buf65, buf66, buf67, buf69, 128, 8192, grid=grid(128), stream=stream0)
buf70 = reinterpret_tensor(buf23, (4, 1024, 16, 16), (262144, 1, 16384, 1024), 0); del buf23 # reuse
# Topologically Sorted Source Nodes: [group_norm_6, out_8], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_13.run(buf65, buf66, buf67, primals_22, primals_23, buf70, 1048576, grid=grid(1048576), stream=stream0)
del primals_23
# Topologically Sorted Source Nodes: [out_9], Original ATen: [aten.convolution]
buf71 = extern_kernels.convolution(buf70, primals_24, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf71, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf72 = buf67; del buf67 # reuse
buf73 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf75 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_7], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_9.run(buf71, buf72, buf73, buf75, 128, 2048, grid=grid(128), stream=stream0)
buf76 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_7, relu_7], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_10.run(buf71, buf72, buf73, primals_25, primals_26, buf76, 262144, grid=grid(262144), stream=stream0)
del primals_26
# Topologically Sorted Source Nodes: [out_10], Original ATen: [aten.convolution]
buf77 = extern_kernels.convolution(buf76, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf77, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf78 = buf73; del buf73 # reuse
buf79 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf81 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_8], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_9.run(buf77, buf78, buf79, buf81, 128, 2048, grid=grid(128), stream=stream0)
buf82 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_8, relu_8], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_10.run(buf77, buf78, buf79, primals_28, primals_29, buf82, 262144, grid=grid(262144), stream=stream0)
del primals_29
# Topologically Sorted Source Nodes: [out_11], Original ATen: [aten.convolution]
buf83 = extern_kernels.convolution(buf82, primals_30, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf83, (4, 1024, 16, 16), (262144, 1, 16384, 1024))
buf84 = buf83; del buf83 # reuse
# Topologically Sorted Source Nodes: [input_6], Original ATen: [aten.add]
triton_poi_fused_add_14.run(buf84, buf65, 1048576, grid=grid(1048576), stream=stream0)
buf85 = buf79; del buf79 # reuse
buf86 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf88 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_9], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_12.run(buf84, buf85, buf86, buf88, 128, 8192, grid=grid(128), stream=stream0)
buf89 = empty_strided_cuda((4, 1024, 16, 16), (262144, 1, 16384, 1024), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_9, out_12], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_13.run(buf84, buf85, buf86, primals_31, primals_32, buf89, 1048576, grid=grid(1048576), stream=stream0)
del primals_32
# Topologically Sorted Source Nodes: [out_13], Original ATen: [aten.convolution]
buf90 = extern_kernels.convolution(buf89, primals_33, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf90, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf91 = buf86; del buf86 # reuse
buf92 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf94 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_10], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_9.run(buf90, buf91, buf92, buf94, 128, 2048, grid=grid(128), stream=stream0)
buf95 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_10, relu_10], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_10.run(buf90, buf91, buf92, primals_34, primals_35, buf95, 262144, grid=grid(262144), stream=stream0)
del primals_35
# Topologically Sorted Source Nodes: [out_14], Original ATen: [aten.convolution]
buf96 = extern_kernels.convolution(buf95, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf96, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf97 = buf92; del buf92 # reuse
buf98 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf100 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_11], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_9.run(buf96, buf97, buf98, buf100, 128, 2048, grid=grid(128), stream=stream0)
buf101 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_11, relu_11], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_10.run(buf96, buf97, buf98, primals_37, primals_38, buf101, 262144, grid=grid(262144), stream=stream0)
del primals_38
# Topologically Sorted Source Nodes: [out_15], Original ATen: [aten.convolution]
buf102 = extern_kernels.convolution(buf101, primals_39, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf102, (4, 1024, 16, 16), (262144, 1, 16384, 1024))
buf103 = buf102; del buf102 # reuse
# Topologically Sorted Source Nodes: [input_7], Original ATen: [aten.add]
triton_poi_fused_add_14.run(buf103, buf84, 1048576, grid=grid(1048576), stream=stream0)
buf104 = buf98; del buf98 # reuse
buf105 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf107 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_12], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_12.run(buf103, buf104, buf105, buf107, 128, 8192, grid=grid(128), stream=stream0)
buf108 = empty_strided_cuda((4, 1024, 16, 16), (262144, 1, 16384, 1024), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_12, out_16], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_13.run(buf103, buf104, buf105, primals_40, primals_41, buf108, 1048576, grid=grid(1048576), stream=stream0)
del primals_41
# Topologically Sorted Source Nodes: [residual_1], Original ATen: [aten.convolution]
buf109 = extern_kernels.convolution(buf108, primals_42, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf109, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
# Topologically Sorted Source Nodes: [out_17], Original ATen: [aten.convolution]
buf110 = extern_kernels.convolution(buf108, primals_43, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf110, (4, 512, 16, 16), (131072, 1, 8192, 512))
buf111 = buf105; del buf105 # reuse
buf112 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf114 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_13], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_15.run(buf110, buf111, buf112, buf114, 128, 4096, grid=grid(128), stream=stream0)
buf115 = empty_strided_cuda((4, 512, 16, 16), (131072, 1, 8192, 512), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_13, relu_13], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_16.run(buf110, buf111, buf112, primals_44, primals_45, buf115, 524288, grid=grid(524288), stream=stream0)
del primals_45
# Topologically Sorted Source Nodes: [out_18], Original ATen: [aten.convolution]
buf116 = extern_kernels.convolution(buf115, buf6, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf116, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf117 = buf112; del buf112 # reuse
buf118 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf120 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_14], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_17.run(buf116, buf117, buf118, buf120, 128, 1024, grid=grid(128), stream=stream0)
buf121 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_14, relu_14], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_18.run(buf116, buf117, buf118, primals_47, primals_48, buf121, 131072, grid=grid(131072), stream=stream0)
del primals_48
# Topologically Sorted Source Nodes: [out_19], Original ATen: [aten.convolution]
buf122 = extern_kernels.convolution(buf121, primals_49, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf122, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf123 = buf109; del buf109 # reuse
# Topologically Sorted Source Nodes: [input_8], Original ATen: [aten.add]
triton_poi_fused_add_19.run(buf123, buf122, 524288, grid=grid(524288), stream=stream0)
buf124 = buf118; del buf118 # reuse
buf125 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf127 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_15], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_20.run(buf123, buf124, buf125, buf127, 128, 4096, grid=grid(128), stream=stream0)
buf128 = buf122; del buf122 # reuse
# Topologically Sorted Source Nodes: [group_norm_15, out_20], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_21.run(buf123, buf124, buf125, primals_50, primals_51, buf128, 524288, grid=grid(524288), stream=stream0)
del primals_51
# Topologically Sorted Source Nodes: [out_21], Original ATen: [aten.convolution]
buf129 = extern_kernels.convolution(buf128, primals_52, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf129, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf130 = buf125; del buf125 # reuse
buf131 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf133 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_16], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_17.run(buf129, buf130, buf131, buf133, 128, 1024, grid=grid(128), stream=stream0)
buf134 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_16, relu_16], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_18.run(buf129, buf130, buf131, primals_53, primals_54, buf134, 131072, grid=grid(131072), stream=stream0)
del primals_54
# Topologically Sorted Source Nodes: [out_22], Original ATen: [aten.convolution]
buf135 = extern_kernels.convolution(buf134, buf7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf135, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf136 = buf131; del buf131 # reuse
buf137 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf139 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_17], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_17.run(buf135, buf136, buf137, buf139, 128, 1024, grid=grid(128), stream=stream0)
buf140 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_17, relu_17], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_18.run(buf135, buf136, buf137, primals_56, primals_57, buf140, 131072, grid=grid(131072), stream=stream0)
del primals_57
# Topologically Sorted Source Nodes: [out_23], Original ATen: [aten.convolution]
buf141 = extern_kernels.convolution(buf140, primals_58, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf141, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf142 = buf141; del buf141 # reuse
# Topologically Sorted Source Nodes: [input_9], Original ATen: [aten.add]
triton_poi_fused_add_22.run(buf142, buf123, 524288, grid=grid(524288), stream=stream0)
buf143 = buf137; del buf137 # reuse
buf144 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf146 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_18], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_20.run(buf142, buf143, buf144, buf146, 128, 4096, grid=grid(128), stream=stream0)
buf147 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384, 2048), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_18, out_24], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_21.run(buf142, buf143, buf144, primals_59, primals_60, buf147, 524288, grid=grid(524288), stream=stream0)
del primals_60
# Topologically Sorted Source Nodes: [out_25], Original ATen: [aten.convolution]
buf148 = extern_kernels.convolution(buf147, primals_61, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf148, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf149 = buf144; del buf144 # reuse
buf150 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf152 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_19], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_17.run(buf148, buf149, buf150, buf152, 128, 1024, grid=grid(128), stream=stream0)
buf153 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_19, relu_19], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_18.run(buf148, buf149, buf150, primals_62, primals_63, buf153, 131072, grid=grid(131072), stream=stream0)
del primals_63
# Topologically Sorted Source Nodes: [out_26], Original ATen: [aten.convolution]
buf154 = extern_kernels.convolution(buf153, buf8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf154, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf155 = buf150; del buf150 # reuse
buf156 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf158 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_20], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_17.run(buf154, buf155, buf156, buf158, 128, 1024, grid=grid(128), stream=stream0)
buf159 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_20, relu_20], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_18.run(buf154, buf155, buf156, primals_65, primals_66, buf159, 131072, grid=grid(131072), stream=stream0)
del primals_66
# Topologically Sorted Source Nodes: [out_27], Original ATen: [aten.convolution]
buf160 = extern_kernels.convolution(buf159, primals_67, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf160, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf161 = buf160; del buf160 # reuse
# Topologically Sorted Source Nodes: [input_10], Original ATen: [aten.add]
triton_poi_fused_add_22.run(buf161, buf142, 524288, grid=grid(524288), stream=stream0)
buf162 = buf156; del buf156 # reuse
buf163 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf165 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_21], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_20.run(buf161, buf162, buf163, buf165, 128, 4096, grid=grid(128), stream=stream0)
buf166 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384, 2048), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_21, out_28], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_21.run(buf161, buf162, buf163, primals_68, primals_69, buf166, 524288, grid=grid(524288), stream=stream0)
del primals_69
# Topologically Sorted Source Nodes: [out_29], Original ATen: [aten.convolution]
buf167 = extern_kernels.convolution(buf166, primals_70, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf167, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf168 = buf163; del buf163 # reuse
buf169 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf171 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_22], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_17.run(buf167, buf168, buf169, buf171, 128, 1024, grid=grid(128), stream=stream0)
buf172 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_22, relu_22], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_18.run(buf167, buf168, buf169, primals_71, primals_72, buf172, 131072, grid=grid(131072), stream=stream0)
del primals_72
# Topologically Sorted Source Nodes: [out_30], Original ATen: [aten.convolution]
buf173 = extern_kernels.convolution(buf172, buf9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf173, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf174 = buf169; del buf169 # reuse
buf175 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf177 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_23], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_17.run(buf173, buf174, buf175, buf177, 128, 1024, grid=grid(128), stream=stream0)
buf178 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_23, relu_23], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_18.run(buf173, buf174, buf175, primals_74, primals_75, buf178, 131072, grid=grid(131072), stream=stream0)
del primals_75
# Topologically Sorted Source Nodes: [out_31], Original ATen: [aten.convolution]
buf179 = extern_kernels.convolution(buf178, primals_76, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf179, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf180 = buf179; del buf179 # reuse
# Topologically Sorted Source Nodes: [input_11], Original ATen: [aten.add]
triton_poi_fused_add_22.run(buf180, buf161, 524288, grid=grid(524288), stream=stream0)
buf181 = buf175; del buf175 # reuse
buf182 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf184 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_24], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_20.run(buf180, buf181, buf182, buf184, 128, 4096, grid=grid(128), stream=stream0)
buf185 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384, 2048), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_24, out_32], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_21.run(buf180, buf181, buf182, primals_77, primals_78, buf185, 524288, grid=grid(524288), stream=stream0)
del primals_78
# Topologically Sorted Source Nodes: [residual_2], Original ATen: [aten.convolution]
buf186 = extern_kernels.convolution(buf185, primals_79, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf186, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
# Topologically Sorted Source Nodes: [out_33], Original ATen: [aten.convolution]
buf187 = extern_kernels.convolution(buf185, primals_80, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf187, (4, 1024, 8, 8), (65536, 1, 8192, 1024))
buf188 = buf182; del buf182 # reuse
buf189 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf191 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_25], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_23.run(buf187, buf188, buf189, buf191, 128, 2048, grid=grid(128), stream=stream0)
buf192 = empty_strided_cuda((4, 1024, 8, 8), (65536, 1, 8192, 1024), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_25, relu_25], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_24.run(buf187, buf188, buf189, primals_81, primals_82, buf192, 262144, grid=grid(262144), stream=stream0)
del primals_82
# Topologically Sorted Source Nodes: [out_34], Original ATen: [aten.convolution]
buf193 = extern_kernels.convolution(buf192, buf10, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf193, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf194 = buf189; del buf189 # reuse
buf195 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf197 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_26], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_25.run(buf193, buf194, buf195, buf197, 128, 512, grid=grid(128), stream=stream0)
buf198 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_26, relu_26], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_26.run(buf193, buf194, buf195, primals_84, primals_85, buf198, 65536, grid=grid(65536), stream=stream0)
del primals_85
# Topologically Sorted Source Nodes: [out_35], Original ATen: [aten.convolution]
buf199 = extern_kernels.convolution(buf198, primals_86, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf199, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf200 = buf186; del buf186 # reuse
# Topologically Sorted Source Nodes: [input_12], Original ATen: [aten.add]
triton_poi_fused_add_27.run(buf200, buf199, 262144, grid=grid(262144), stream=stream0)
buf201 = buf195; del buf195 # reuse
buf202 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf204 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_27], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_28.run(buf200, buf201, buf202, buf204, 128, 2048, grid=grid(128), stream=stream0)
buf205 = buf199; del buf199 # reuse
# Topologically Sorted Source Nodes: [group_norm_27, out_36], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_29.run(buf200, buf201, buf202, primals_87, primals_88, buf205, 262144, grid=grid(262144), stream=stream0)
del primals_88
# Topologically Sorted Source Nodes: [out_37], Original ATen: [aten.convolution]
buf206 = extern_kernels.convolution(buf205, primals_89, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf206, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf207 = buf202; del buf202 # reuse
buf208 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf210 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_28], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_25.run(buf206, buf207, buf208, buf210, 128, 512, grid=grid(128), stream=stream0)
buf211 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_28, relu_28], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_26.run(buf206, buf207, buf208, primals_90, primals_91, buf211, 65536, grid=grid(65536), stream=stream0)
del primals_91
# Topologically Sorted Source Nodes: [out_38], Original ATen: [aten.convolution]
buf212 = extern_kernels.convolution(buf211, buf11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf212, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf213 = buf208; del buf208 # reuse
buf214 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf216 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_29], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_25.run(buf212, buf213, buf214, buf216, 128, 512, grid=grid(128), stream=stream0)
buf217 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_29, relu_29], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_26.run(buf212, buf213, buf214, primals_93, primals_94, buf217, 65536, grid=grid(65536), stream=stream0)
del primals_94
# Topologically Sorted Source Nodes: [out_39], Original ATen: [aten.convolution]
buf218 = extern_kernels.convolution(buf217, primals_95, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf218, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf219 = buf218; del buf218 # reuse
# Topologically Sorted Source Nodes: [input_13], Original ATen: [aten.add]
triton_poi_fused_add_30.run(buf219, buf200, 262144, grid=grid(262144), stream=stream0)
buf220 = buf214; del buf214 # reuse
buf221 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf223 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_30], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_28.run(buf219, buf220, buf221, buf223, 128, 2048, grid=grid(128), stream=stream0)
buf224 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_30, out_40], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_29.run(buf219, buf220, buf221, primals_96, primals_97, buf224, 262144, grid=grid(262144), stream=stream0)
del primals_97
# Topologically Sorted Source Nodes: [out_41], Original ATen: [aten.convolution]
buf225 = extern_kernels.convolution(buf224, primals_98, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf225, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf226 = buf221; del buf221 # reuse
buf227 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf229 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_31], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_25.run(buf225, buf226, buf227, buf229, 128, 512, grid=grid(128), stream=stream0)
buf230 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_31, relu_31], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_26.run(buf225, buf226, buf227, primals_99, primals_100, buf230, 65536, grid=grid(65536), stream=stream0)
del primals_100
# Topologically Sorted Source Nodes: [out_42], Original ATen: [aten.convolution]
buf231 = extern_kernels.convolution(buf230, buf12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf231, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf232 = buf227; del buf227 # reuse
buf233 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf235 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_32], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_25.run(buf231, buf232, buf233, buf235, 128, 512, grid=grid(128), stream=stream0)
buf236 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_32, relu_32], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_26.run(buf231, buf232, buf233, primals_102, primals_103, buf236, 65536, grid=grid(65536), stream=stream0)
del primals_103
# Topologically Sorted Source Nodes: [out_43], Original ATen: [aten.convolution]
buf237 = extern_kernels.convolution(buf236, primals_104, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf237, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf238 = buf237; del buf237 # reuse
# Topologically Sorted Source Nodes: [input_14], Original ATen: [aten.add]
triton_poi_fused_add_30.run(buf238, buf219, 262144, grid=grid(262144), stream=stream0)
buf239 = buf233; del buf233 # reuse
buf240 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf242 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_33], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_28.run(buf238, buf239, buf240, buf242, 128, 2048, grid=grid(128), stream=stream0)
buf243 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_33, out_44], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_29.run(buf238, buf239, buf240, primals_105, primals_106, buf243, 262144, grid=grid(262144), stream=stream0)
del primals_106
# Topologically Sorted Source Nodes: [out_45], Original ATen: [aten.convolution]
buf244 = extern_kernels.convolution(buf243, primals_107, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf244, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf245 = buf240; del buf240 # reuse
buf246 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf248 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_34], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_25.run(buf244, buf245, buf246, buf248, 128, 512, grid=grid(128), stream=stream0)
buf249 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_34, relu_34], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_26.run(buf244, buf245, buf246, primals_108, primals_109, buf249, 65536, grid=grid(65536), stream=stream0)
del primals_109
# Topologically Sorted Source Nodes: [out_46], Original ATen: [aten.convolution]
buf250 = extern_kernels.convolution(buf249, buf13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf250, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf251 = buf246; del buf246 # reuse
buf252 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf254 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_35], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_25.run(buf250, buf251, buf252, buf254, 128, 512, grid=grid(128), stream=stream0)
buf255 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_35, relu_35], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_26.run(buf250, buf251, buf252, primals_111, primals_112, buf255, 65536, grid=grid(65536), stream=stream0)
del primals_112
# Topologically Sorted Source Nodes: [out_47], Original ATen: [aten.convolution]
buf256 = extern_kernels.convolution(buf255, primals_113, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf256, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf257 = buf256; del buf256 # reuse
# Topologically Sorted Source Nodes: [input_15], Original ATen: [aten.add]
triton_poi_fused_add_30.run(buf257, buf238, 262144, grid=grid(262144), stream=stream0)
buf258 = buf252; del buf252 # reuse
buf259 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf261 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_36], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_28.run(buf257, buf258, buf259, buf261, 128, 2048, grid=grid(128), stream=stream0)
buf262 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_36, out_48], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_29.run(buf257, buf258, buf259, primals_114, primals_115, buf262, 262144, grid=grid(262144), stream=stream0)
del primals_115
# Topologically Sorted Source Nodes: [residual_3], Original ATen: [aten.convolution]
buf263 = extern_kernels.convolution(buf262, primals_116, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf263, (4, 8192, 2, 2), (32768, 1, 16384, 8192))
# Topologically Sorted Source Nodes: [out_49], Original ATen: [aten.convolution]
buf264 = extern_kernels.convolution(buf262, primals_117, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf264, (4, 2048, 4, 4), (32768, 1, 8192, 2048))
buf265 = buf259; del buf259 # reuse
buf266 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf268 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_37], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_31.run(buf264, buf265, buf266, buf268, 128, 1024, grid=grid(128), stream=stream0)
buf269 = empty_strided_cuda((4, 2048, 4, 4), (32768, 1, 8192, 2048), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_37, relu_37], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_32.run(buf264, buf265, buf266, primals_118, primals_119, buf269, 131072, grid=grid(131072), stream=stream0)
del primals_119
# Topologically Sorted Source Nodes: [out_50], Original ATen: [aten.convolution]
buf270 = extern_kernels.convolution(buf269, buf14, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf270, (4, 2048, 2, 2), (8192, 1, 4096, 2048))
buf271 = buf266; del buf266 # reuse
buf272 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf274 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_38], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_33.run(buf270, buf271, buf272, buf274, 128, 256, grid=grid(128), stream=stream0)
buf275 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_38, relu_38], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_34.run(buf270, buf271, buf272, primals_121, primals_122, buf275, 32768, grid=grid(32768), stream=stream0)
del primals_122
# Topologically Sorted Source Nodes: [out_51], Original ATen: [aten.convolution]
buf276 = extern_kernels.convolution(buf275, primals_123, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf276, (4, 8192, 2, 2), (32768, 1, 16384, 8192))
buf277 = buf263; del buf263 # reuse
# Topologically Sorted Source Nodes: [input_16], Original ATen: [aten.add]
triton_poi_fused_add_35.run(buf277, buf276, 131072, grid=grid(131072), stream=stream0)
buf278 = buf272; del buf272 # reuse
buf279 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf281 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_39], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_36.run(buf277, buf278, buf279, buf281, 128, 1024, grid=grid(128), stream=stream0)
buf282 = buf276; del buf276 # reuse
# Topologically Sorted Source Nodes: [group_norm_39, out_52], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_37.run(buf277, buf278, buf279, primals_124, primals_125, buf282, 131072, grid=grid(131072), stream=stream0)
del primals_125
# Topologically Sorted Source Nodes: [out_53], Original ATen: [aten.convolution]
buf283 = extern_kernels.convolution(buf282, primals_126, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf283, (4, 2048, 2, 2), (8192, 1, 4096, 2048))
buf284 = buf279; del buf279 # reuse
buf285 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf287 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_40], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_33.run(buf283, buf284, buf285, buf287, 128, 256, grid=grid(128), stream=stream0)
buf288 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_40, relu_40], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_34.run(buf283, buf284, buf285, primals_127, primals_128, buf288, 32768, grid=grid(32768), stream=stream0)
del primals_128
# Topologically Sorted Source Nodes: [out_54], Original ATen: [aten.convolution]
buf289 = extern_kernels.convolution(buf288, buf15, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf289, (4, 2048, 2, 2), (8192, 1, 4096, 2048))
buf290 = buf285; del buf285 # reuse
buf291 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf293 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_41], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_33.run(buf289, buf290, buf291, buf293, 128, 256, grid=grid(128), stream=stream0)
buf294 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_41, relu_41], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_34.run(buf289, buf290, buf291, primals_130, primals_131, buf294, 32768, grid=grid(32768), stream=stream0)
del primals_131
# Topologically Sorted Source Nodes: [out_55], Original ATen: [aten.convolution]
buf295 = extern_kernels.convolution(buf294, primals_132, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf295, (4, 8192, 2, 2), (32768, 1, 16384, 8192))
buf296 = buf295; del buf295 # reuse
# Topologically Sorted Source Nodes: [input_17], Original ATen: [aten.add]
triton_poi_fused_add_38.run(buf296, buf277, 131072, grid=grid(131072), stream=stream0)
buf297 = buf291; del buf291 # reuse
buf298 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf300 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_42], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_36.run(buf296, buf297, buf298, buf300, 128, 1024, grid=grid(128), stream=stream0)
buf301 = empty_strided_cuda((4, 8192, 2, 2), (32768, 1, 16384, 8192), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_42, out_56], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_37.run(buf296, buf297, buf298, primals_133, primals_134, buf301, 131072, grid=grid(131072), stream=stream0)
del primals_134
# Topologically Sorted Source Nodes: [out_57], Original ATen: [aten.convolution]
buf302 = extern_kernels.convolution(buf301, primals_135, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf302, (4, 2048, 2, 2), (8192, 1, 4096, 2048))
buf303 = buf298; del buf298 # reuse
buf304 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf306 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_43], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_33.run(buf302, buf303, buf304, buf306, 128, 256, grid=grid(128), stream=stream0)
buf307 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_43, relu_43], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_34.run(buf302, buf303, buf304, primals_136, primals_137, buf307, 32768, grid=grid(32768), stream=stream0)
del primals_137
# Topologically Sorted Source Nodes: [out_58], Original ATen: [aten.convolution]
buf308 = extern_kernels.convolution(buf307, buf16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf308, (4, 2048, 2, 2), (8192, 1, 4096, 2048))
buf309 = buf304; del buf304 # reuse
buf310 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf312 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_44], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_33.run(buf308, buf309, buf310, buf312, 128, 256, grid=grid(128), stream=stream0)
buf313 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_44, relu_44], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_34.run(buf308, buf309, buf310, primals_139, primals_140, buf313, 32768, grid=grid(32768), stream=stream0)
del primals_140
# Topologically Sorted Source Nodes: [out_59], Original ATen: [aten.convolution]
buf314 = extern_kernels.convolution(buf313, primals_141, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf314, (4, 8192, 2, 2), (32768, 1, 16384, 8192))
buf315 = buf314; del buf314 # reuse
# Topologically Sorted Source Nodes: [input_18], Original ATen: [aten.add]
triton_poi_fused_add_38.run(buf315, buf296, 131072, grid=grid(131072), stream=stream0)
buf316 = buf310; del buf310 # reuse
buf317 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf319 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_45], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_36.run(buf315, buf316, buf317, buf319, 128, 1024, grid=grid(128), stream=stream0)
buf320 = empty_strided_cuda((4, 8192, 2, 2), (32768, 1, 16384, 8192), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_45, out_60], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_37.run(buf315, buf316, buf317, primals_142, primals_143, buf320, 131072, grid=grid(131072), stream=stream0)
del primals_143
# Topologically Sorted Source Nodes: [out_61], Original ATen: [aten.convolution]
buf321 = extern_kernels.convolution(buf320, primals_144, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf321, (4, 2048, 2, 2), (8192, 1, 4096, 2048))
buf322 = buf317; del buf317 # reuse
buf323 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf325 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_46], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_33.run(buf321, buf322, buf323, buf325, 128, 256, grid=grid(128), stream=stream0)
buf326 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_46, relu_46], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_34.run(buf321, buf322, buf323, primals_145, primals_146, buf326, 32768, grid=grid(32768), stream=stream0)
del primals_146
# Topologically Sorted Source Nodes: [out_62], Original ATen: [aten.convolution]
buf327 = extern_kernels.convolution(buf326, buf17, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf327, (4, 2048, 2, 2), (8192, 1, 4096, 2048))
buf328 = buf323; del buf323 # reuse
buf329 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf331 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_47], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_33.run(buf327, buf328, buf329, buf331, 128, 256, grid=grid(128), stream=stream0)
buf332 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_47, relu_47], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_34.run(buf327, buf328, buf329, primals_148, primals_149, buf332, 32768, grid=grid(32768), stream=stream0)
del primals_149
# Topologically Sorted Source Nodes: [out_63], Original ATen: [aten.convolution]
buf333 = extern_kernels.convolution(buf332, primals_150, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf333, (4, 8192, 2, 2), (32768, 1, 16384, 8192))
buf334 = buf333; del buf333 # reuse
# Topologically Sorted Source Nodes: [input_19], Original ATen: [aten.add]
triton_poi_fused_add_38.run(buf334, buf315, 131072, grid=grid(131072), stream=stream0)
buf335 = reinterpret_tensor(buf329, (4, 32, 1, 1), (32, 1, 32, 32), 0); del buf329 # reuse
buf336 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf338 = reinterpret_tensor(buf336, (4, 32, 1, 1), (32, 1, 32, 32), 0); del buf336 # reuse
# Topologically Sorted Source Nodes: [input_20], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_39.run(buf338, buf334, buf335, 128, 1024, grid=grid(128), stream=stream0)
buf339 = empty_strided_cuda((4, 8192, 1, 1), (8192, 1, 8192, 8192), torch.float32)
# Topologically Sorted Source Nodes: [input_20, input_21, input_22], Original ATen: [aten.native_group_norm, aten.relu, aten.mean]
triton_poi_fused_mean_native_group_norm_relu_40.run(buf334, buf335, buf338, primals_151, primals_152, buf339, 32768, grid=grid(32768), stream=stream0)
# Topologically Sorted Source Nodes: [input_23], Original ATen: [aten.convolution]
buf340 = extern_kernels.convolution(buf339, primals_153, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf340, (4, 21843, 1, 1), (21843, 1, 21843, 21843))
buf341 = reinterpret_tensor(buf340, (4, 21843, 1, 1), (21843, 1, 87372, 87372), 0); del buf340 # reuse
# Topologically Sorted Source Nodes: [input_23], Original ATen: [aten.convolution]
triton_poi_fused_convolution_41.run(buf341, primals_154, 87372, grid=grid(87372), stream=stream0)
del primals_154
return (reinterpret_tensor(buf341, (4, 21843), (21843, 1), 0), buf0, buf1, primals_3, primals_5, primals_6, primals_7, buf2, primals_10, primals_12, primals_13, primals_15, primals_16, buf3, primals_19, primals_21, primals_22, primals_24, primals_25, buf4, primals_28, primals_30, primals_31, primals_33, primals_34, buf5, primals_37, primals_39, primals_40, primals_42, primals_43, primals_44, buf6, primals_47, primals_49, primals_50, primals_52, primals_53, buf7, primals_56, primals_58, primals_59, primals_61, primals_62, buf8, primals_65, primals_67, primals_68, primals_70, primals_71, buf9, primals_74, primals_76, primals_77, primals_79, primals_80, primals_81, buf10, primals_84, primals_86, primals_87, primals_89, primals_90, buf11, primals_93, primals_95, primals_96, primals_98, primals_99, buf12, primals_102, primals_104, primals_105, primals_107, primals_108, buf13, primals_111, primals_113, primals_114, primals_116, primals_117, primals_118, buf14, primals_121, primals_123, primals_124, primals_126, primals_127, buf15, primals_130, primals_132, primals_133, primals_135, primals_136, buf16, primals_139, primals_141, primals_142, primals_144, primals_145, buf17, primals_148, primals_150, primals_151, primals_152, primals_153, buf21, buf22, buf24, buf25, buf26, reinterpret_tensor(buf27, (4, 32), (32, 1), 0), reinterpret_tensor(buf30, (4, 32), (32, 1), 0), buf31, buf33, reinterpret_tensor(buf34, (4, 32), (32, 1), 0), reinterpret_tensor(buf37, (4, 32), (32, 1), 0), buf38, buf39, reinterpret_tensor(buf40, (4, 32), (32, 1), 0), reinterpret_tensor(buf43, (4, 32), (32, 1), 0), buf44, buf46, reinterpret_tensor(buf47, (4, 32), (32, 1), 0), reinterpret_tensor(buf50, (4, 32), (32, 1), 0), buf51, buf52, reinterpret_tensor(buf53, (4, 32), (32, 1), 0), reinterpret_tensor(buf56, (4, 32), (32, 1), 0), buf57, buf58, reinterpret_tensor(buf59, (4, 32), (32, 1), 0), reinterpret_tensor(buf62, (4, 32), (32, 1), 0), buf63, buf65, reinterpret_tensor(buf66, (4, 32), (32, 1), 0), reinterpret_tensor(buf69, (4, 32), (32, 1), 0), buf70, buf71, reinterpret_tensor(buf72, (4, 32), (32, 1), 0), reinterpret_tensor(buf75, (4, 32), (32, 1), 0), buf76, buf77, reinterpret_tensor(buf78, (4, 32), (32, 1), 0), reinterpret_tensor(buf81, (4, 32), (32, 1), 0), buf82, buf84, reinterpret_tensor(buf85, (4, 32), (32, 1), 0), reinterpret_tensor(buf88, (4, 32), (32, 1), 0), buf89, buf90, reinterpret_tensor(buf91, (4, 32), (32, 1), 0), reinterpret_tensor(buf94, (4, 32), (32, 1), 0), buf95, buf96, reinterpret_tensor(buf97, (4, 32), (32, 1), 0), reinterpret_tensor(buf100, (4, 32), (32, 1), 0), buf101, buf103, reinterpret_tensor(buf104, (4, 32), (32, 1), 0), reinterpret_tensor(buf107, (4, 32), (32, 1), 0), buf108, buf110, reinterpret_tensor(buf111, (4, 32), (32, 1), 0), reinterpret_tensor(buf114, (4, 32), (32, 1), 0), buf115, buf116, reinterpret_tensor(buf117, (4, 32), (32, 1), 0), reinterpret_tensor(buf120, (4, 32), (32, 1), 0), buf121, buf123, reinterpret_tensor(buf124, (4, 32), (32, 1), 0), reinterpret_tensor(buf127, (4, 32), (32, 1), 0), buf128, buf129, reinterpret_tensor(buf130, (4, 32), (32, 1), 0), reinterpret_tensor(buf133, (4, 32), (32, 1), 0), buf134, buf135, reinterpret_tensor(buf136, (4, 32), (32, 1), 0), reinterpret_tensor(buf139, (4, 32), (32, 1), 0), buf140, buf142, reinterpret_tensor(buf143, (4, 32), (32, 1), 0), reinterpret_tensor(buf146, (4, 32), (32, 1), 0), buf147, buf148, reinterpret_tensor(buf149, (4, 32), (32, 1), 0), reinterpret_tensor(buf152, (4, 32), (32, 1), 0), buf153, buf154, reinterpret_tensor(buf155, (4, 32), (32, 1), 0), reinterpret_tensor(buf158, (4, 32), (32, 1), 0), buf159, buf161, reinterpret_tensor(buf162, (4, 32), (32, 1), 0), reinterpret_tensor(buf165, (4, 32), (32, 1), 0), buf166, buf167, reinterpret_tensor(buf168, (4, 32), (32, 1), 0), reinterpret_tensor(buf171, (4, 32), (32, 1), 0), buf172, buf173, reinterpret_tensor(buf174, (4, 32), (32, 1), 0), reinterpret_tensor(buf177, (4, 32), (32, 1), 0), buf178, buf180, reinterpret_tensor(buf181, (4, 32), (32, 1), 0), reinterpret_tensor(buf184, (4, 32), (32, 1), 0), buf185, buf187, reinterpret_tensor(buf188, (4, 32), (32, 1), 0), reinterpret_tensor(buf191, (4, 32), (32, 1), 0), buf192, buf193, reinterpret_tensor(buf194, (4, 32), (32, 1), 0), reinterpret_tensor(buf197, (4, 32), (32, 1), 0), buf198, buf200, reinterpret_tensor(buf201, (4, 32), (32, 1), 0), reinterpret_tensor(buf204, (4, 32), (32, 1), 0), buf205, buf206, reinterpret_tensor(buf207, (4, 32), (32, 1), 0), reinterpret_tensor(buf210, (4, 32), (32, 1), 0), buf211, buf212, reinterpret_tensor(buf213, (4, 32), (32, 1), 0), reinterpret_tensor(buf216, (4, 32), (32, 1), 0), buf217, buf219, reinterpret_tensor(buf220, (4, 32), (32, 1), 0), reinterpret_tensor(buf223, (4, 32), (32, 1), 0), buf224, buf225, reinterpret_tensor(buf226, (4, 32), (32, 1), 0), reinterpret_tensor(buf229, (4, 32), (32, 1), 0), buf230, buf231, reinterpret_tensor(buf232, (4, 32), (32, 1), 0), reinterpret_tensor(buf235, (4, 32), (32, 1), 0), buf236, buf238, reinterpret_tensor(buf239, (4, 32), (32, 1), 0), reinterpret_tensor(buf242, (4, 32), (32, 1), 0), buf243, buf244, reinterpret_tensor(buf245, (4, 32), (32, 1), 0), reinterpret_tensor(buf248, (4, 32), (32, 1), 0), buf249, buf250, reinterpret_tensor(buf251, (4, 32), (32, 1), 0), reinterpret_tensor(buf254, (4, 32), (32, 1), 0), buf255, buf257, reinterpret_tensor(buf258, (4, 32), (32, 1), 0), reinterpret_tensor(buf261, (4, 32), (32, 1), 0), buf262, buf264, reinterpret_tensor(buf265, (4, 32), (32, 1), 0), reinterpret_tensor(buf268, (4, 32), (32, 1), 0), buf269, buf270, reinterpret_tensor(buf271, (4, 32), (32, 1), 0), reinterpret_tensor(buf274, (4, 32), (32, 1), 0), buf275, buf277, reinterpret_tensor(buf278, (4, 32), (32, 1), 0), reinterpret_tensor(buf281, (4, 32), (32, 1), 0), buf282, buf283, reinterpret_tensor(buf284, (4, 32), (32, 1), 0), reinterpret_tensor(buf287, (4, 32), (32, 1), 0), buf288, buf289, reinterpret_tensor(buf290, (4, 32), (32, 1), 0), reinterpret_tensor(buf293, (4, 32), (32, 1), 0), buf294, buf296, reinterpret_tensor(buf297, (4, 32), (32, 1), 0), reinterpret_tensor(buf300, (4, 32), (32, 1), 0), buf301, buf302, reinterpret_tensor(buf303, (4, 32), (32, 1), 0), reinterpret_tensor(buf306, (4, 32), (32, 1), 0), buf307, buf308, reinterpret_tensor(buf309, (4, 32), (32, 1), 0), reinterpret_tensor(buf312, (4, 32), (32, 1), 0), buf313, buf315, reinterpret_tensor(buf316, (4, 32), (32, 1), 0), reinterpret_tensor(buf319, (4, 32), (32, 1), 0), buf320, buf321, reinterpret_tensor(buf322, (4, 32), (32, 1), 0), reinterpret_tensor(buf325, (4, 32), (32, 1), 0), buf326, buf327, reinterpret_tensor(buf328, (4, 32), (32, 1), 0), reinterpret_tensor(buf331, (4, 32), (32, 1), 0), buf332, buf334, buf335, buf338, buf339, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((256, 3, 7, 7), (147, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1024, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((256, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((1024, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((256, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((1024, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((256, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_28 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_29 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_30 = rand_strided((1024, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_31 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_32 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_33 = rand_strided((256, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_34 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_35 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_36 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_37 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_38 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_39 = rand_strided((1024, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_40 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_41 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_42 = rand_strided((2048, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_43 = rand_strided((512, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_44 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_45 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_46 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_47 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_48 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_49 = rand_strided((2048, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_50 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_51 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_52 = rand_strided((512, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_53 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_54 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_55 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_56 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_57 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_58 = rand_strided((2048, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_59 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_60 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_61 = rand_strided((512, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_62 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_63 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_64 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_65 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_66 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_67 = rand_strided((2048, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_68 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_69 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_70 = rand_strided((512, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_71 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_72 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_73 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_74 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_75 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_76 = rand_strided((2048, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_77 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_78 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_79 = rand_strided((4096, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_80 = rand_strided((1024, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_81 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_82 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_83 = rand_strided((1024, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_84 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_85 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_86 = rand_strided((4096, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_87 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_88 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_89 = rand_strided((1024, 4096, 1, 1), (4096, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_90 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_91 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_92 = rand_strided((1024, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_93 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_94 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_95 = rand_strided((4096, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_96 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_97 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_98 = rand_strided((1024, 4096, 1, 1), (4096, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_99 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_100 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_101 = rand_strided((1024, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_102 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_103 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_104 = rand_strided((4096, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_105 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_106 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_107 = rand_strided((1024, 4096, 1, 1), (4096, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_108 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_109 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_110 = rand_strided((1024, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_111 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_112 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_113 = rand_strided((4096, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_114 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_115 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_116 = rand_strided((8192, 4096, 1, 1), (4096, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_117 = rand_strided((2048, 4096, 1, 1), (4096, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_118 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_119 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_120 = rand_strided((2048, 2048, 3, 3), (18432, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_121 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_122 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_123 = rand_strided((8192, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_124 = rand_strided((8192, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_125 = rand_strided((8192, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_126 = rand_strided((2048, 8192, 1, 1), (8192, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_127 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_128 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_129 = rand_strided((2048, 2048, 3, 3), (18432, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_130 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_131 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_132 = rand_strided((8192, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_133 = rand_strided((8192, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_134 = rand_strided((8192, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_135 = rand_strided((2048, 8192, 1, 1), (8192, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_136 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_137 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_138 = rand_strided((2048, 2048, 3, 3), (18432, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_139 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_140 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_141 = rand_strided((8192, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_142 = rand_strided((8192, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_143 = rand_strided((8192, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_144 = rand_strided((2048, 8192, 1, 1), (8192, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_145 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_146 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_147 = rand_strided((2048, 2048, 3, 3), (18432, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_148 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_149 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_150 = rand_strided((8192, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_151 = rand_strided((8192, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_152 = rand_strided((8192, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_153 = rand_strided((21843, 8192, 1, 1), (8192, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_154 = rand_strided((21843, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102, primals_103, primals_104, primals_105, primals_106, primals_107, primals_108, primals_109, primals_110, primals_111, primals_112, primals_113, primals_114, primals_115, primals_116, primals_117, primals_118, primals_119, primals_120, primals_121, primals_122, primals_123, primals_124, primals_125, primals_126, primals_127, primals_128, primals_129, primals_130, primals_131, primals_132, primals_133, primals_134, primals_135, primals_136, primals_137, primals_138, primals_139, primals_140, primals_141, primals_142, primals_143, primals_144, primals_145, primals_146, primals_147, primals_148, primals_149, primals_150, primals_151, primals_152, primals_153, primals_154])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torch.nn
from torch.nn import functional as F
from collections import OrderedDict
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
bias=False)
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def tf2th(conv_weights):
"""Possibly convert HWIO to OIHW."""
if conv_weights.ndim == 4:
conv_weights = conv_weights.transpose([3, 2, 0, 1])
return torch.from_numpy(conv_weights)
class StdConv2d(nn.Conv2d):
def forward(self, x):
w = self.weight
v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False)
w = (w - m) / torch.sqrt(v + 1e-10)
return F.conv2d(x, w, self.bias, self.stride, self.padding, self.
dilation, self.groups)
class PreActBottleneck(nn.Module):
"""Pre-activation (v2) bottleneck block.
Follows the implementation of "Identity Mappings in Deep Residual Networks":
https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua
Except it puts the stride on 3x3 conv when available.
"""
def __init__(self, cin, cout=None, cmid=None, stride=1):
super().__init__()
cout = cout or cin
cmid = cmid or cout // 4
self.gn1 = nn.GroupNorm(32, cin)
self.conv1 = conv1x1(cin, cmid)
self.gn2 = nn.GroupNorm(32, cmid)
self.conv2 = conv3x3(cmid, cmid, stride)
self.gn3 = nn.GroupNorm(32, cmid)
self.conv3 = conv1x1(cmid, cout)
self.relu = nn.ReLU(inplace=True)
if stride != 1 or cin != cout:
self.downsample = conv1x1(cin, cout, stride)
def forward(self, x):
out = self.relu(self.gn1(x))
residual = x
if hasattr(self, 'downsample'):
residual = self.downsample(out)
out = self.conv1(out)
out = self.conv2(self.relu(self.gn2(out)))
out = self.conv3(self.relu(self.gn3(out)))
return out + residual
def load_from(self, weights, prefix=''):
convname = 'standardized_conv2d'
with torch.no_grad():
self.conv1.weight.copy_(tf2th(weights[
f'{prefix}a/{convname}/kernel']))
self.conv2.weight.copy_(tf2th(weights[
f'{prefix}b/{convname}/kernel']))
self.conv3.weight.copy_(tf2th(weights[
f'{prefix}c/{convname}/kernel']))
self.gn1.weight.copy_(tf2th(weights[f'{prefix}a/group_norm/gamma'])
)
self.gn2.weight.copy_(tf2th(weights[f'{prefix}b/group_norm/gamma'])
)
self.gn3.weight.copy_(tf2th(weights[f'{prefix}c/group_norm/gamma'])
)
self.gn1.bias.copy_(tf2th(weights[f'{prefix}a/group_norm/beta']))
self.gn2.bias.copy_(tf2th(weights[f'{prefix}b/group_norm/beta']))
self.gn3.bias.copy_(tf2th(weights[f'{prefix}c/group_norm/beta']))
if hasattr(self, 'downsample'):
w = weights[f'{prefix}a/proj/{convname}/kernel']
self.downsample.weight.copy_(tf2th(w))
class ResNetV2(nn.Module):
"""Implementation of Pre-activation (v2) ResNet mode."""
def __init__(self, block_units, width_factor, head_size=21843,
zero_head=False):
super().__init__()
wf = width_factor
self.root = nn.Sequential(OrderedDict([('conv', StdConv2d(3, 64 *
wf, kernel_size=7, stride=2, padding=3, bias=False)), ('pad',
nn.ConstantPad2d(1, 0)), ('pool', nn.MaxPool2d(kernel_size=3,
stride=2, padding=0))]))
self.body = nn.Sequential(OrderedDict([('block1', nn.Sequential(
OrderedDict([('unit01', PreActBottleneck(cin=64 * wf, cout=256 *
wf, cmid=64 * wf))] + [(f'unit{i:02d}', PreActBottleneck(cin=
256 * wf, cout=256 * wf, cmid=64 * wf)) for i in range(2,
block_units[0] + 1)]))), ('block2', nn.Sequential(OrderedDict([
('unit01', PreActBottleneck(cin=256 * wf, cout=512 * wf, cmid=
128 * wf, stride=2))] + [(f'unit{i:02d}', PreActBottleneck(cin=
512 * wf, cout=512 * wf, cmid=128 * wf)) for i in range(2,
block_units[1] + 1)]))), ('block3', nn.Sequential(OrderedDict([
('unit01', PreActBottleneck(cin=512 * wf, cout=1024 * wf, cmid=
256 * wf, stride=2))] + [(f'unit{i:02d}', PreActBottleneck(cin=
1024 * wf, cout=1024 * wf, cmid=256 * wf)) for i in range(2,
block_units[2] + 1)]))), ('block4', nn.Sequential(OrderedDict([
('unit01', PreActBottleneck(cin=1024 * wf, cout=2048 * wf, cmid
=512 * wf, stride=2))] + [(f'unit{i:02d}', PreActBottleneck(cin
=2048 * wf, cout=2048 * wf, cmid=512 * wf)) for i in range(2,
block_units[3] + 1)])))]))
self.zero_head = zero_head
self.head = nn.Sequential(OrderedDict([('gn', nn.GroupNorm(32, 2048 *
wf)), ('relu', nn.ReLU(inplace=True)), ('avg', nn.
AdaptiveAvgPool2d(output_size=1)), ('conv', nn.Conv2d(2048 * wf,
head_size, kernel_size=1, bias=True))]))
def forward(self, x):
x = self.head(self.body(self.root(x)))
assert x.shape[-2:] == (1, 1)
return x[..., 0, 0]
def load_from(self, weights, prefix='resnet/'):
with torch.no_grad():
self.root.conv.weight.copy_(tf2th(weights[
f'{prefix}root_block/standardized_conv2d/kernel']))
self.head.gn.weight.copy_(tf2th(weights[
f'{prefix}group_norm/gamma']))
self.head.gn.bias.copy_(tf2th(weights[f'{prefix}group_norm/beta']))
if self.zero_head:
nn.init.zeros_(self.head.conv.weight)
nn.init.zeros_(self.head.conv.bias)
else:
self.head.conv.weight.copy_(tf2th(weights[
f'{prefix}head/conv2d/kernel']))
self.head.conv.bias.copy_(tf2th(weights[
f'{prefix}head/conv2d/bias']))
for bname, block in self.body.named_children():
for uname, unit in block.named_children():
unit.load_from(weights, prefix=f'{prefix}{bname}/{uname}/')
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {'block_units': [4, 4, 4, 4], 'width_factor': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torch.nn
from torch.nn import functional as F
from collections import OrderedDict
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 768
xnumel = 49
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 49 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 147 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 1024
y1 = yindex // 1024
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 1024 * x2 + 9216 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 2048
y1 = yindex // 2048
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 2048 * x2 + 18432 * y1), tmp0, xmask)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_6(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 256
rnumel = 147
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 147 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(rmask & xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask & xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 147, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(rmask & xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 147.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-10
tmp20 = tmp18 + tmp19
tmp21 = libdevice.sqrt(tmp20)
tmp22 = tmp0 - tmp10
tmp23 = tmp22 / tmp21
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(out_ptr1 + (r1 + 147 * x0), tmp23, rmask & xmask)
@triton.jit
def triton_poi_fused_constant_pad_nd_7(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 8704 % 34
x1 = xindex // 256 % 34
x3 = xindex // 295936
x4 = xindex % 8704
x6 = xindex
tmp0 = -1 + x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x1
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-8448 + x4 + 8192 * x2 + 262144 * x3), tmp10,
other=0.0)
tl.store(out_ptr0 + x6, tmp11, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 256
x1 = xindex // 256 % 16
x2 = xindex // 4096 % 16
x3 = xindex // 65536
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 17408 * x2 + 295936 * x3), None)
tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 17408 * x2 + 295936 *
x3), None)
tmp3 = tl.load(in_ptr0 + (512 + x0 + 512 * x1 + 17408 * x2 + 295936 *
x3), None)
tmp5 = tl.load(in_ptr0 + (8704 + x0 + 512 * x1 + 17408 * x2 + 295936 *
x3), None)
tmp7 = tl.load(in_ptr0 + (8960 + x0 + 512 * x1 + 17408 * x2 + 295936 *
x3), None)
tmp9 = tl.load(in_ptr0 + (9216 + x0 + 512 * x1 + 17408 * x2 + 295936 *
x3), None)
tmp11 = tl.load(in_ptr0 + (17408 + x0 + 512 * x1 + 17408 * x2 + 295936 *
x3), None)
tmp13 = tl.load(in_ptr0 + (17664 + x0 + 512 * x1 + 17408 * x2 + 295936 *
x3), None)
tmp15 = tl.load(in_ptr0 + (17920 + x0 + 512 * x1 + 17408 * x2 + 295936 *
x3), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1], 1, tl.int8)
tmp19 = tl.full([1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tl.store(out_ptr0 + x4, tmp16, None)
tl.store(out_ptr1 + x4, tmp41, None)
@triton.jit
def triton_red_fused_native_group_norm_9(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = xindex // 32
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 8
r3 = rindex // 8
tmp0 = tl.load(in_ptr0 + (r2 + 8 * x0 + 256 * r3 + 65536 * x1),
rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tl.store(out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr1 + x4, tmp3, xmask)
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_native_group_norm_relu_10(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 256
x2 = xindex // 65536
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 8), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 8), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 2048.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = tl.load(in_out_ptr0 + x0, None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, None)
@triton.jit
def triton_red_fused_native_group_norm_12(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = xindex // 32
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 32
r3 = rindex // 32
tmp0 = tl.load(in_ptr0 + (r2 + 32 * x0 + 1024 * r3 + 262144 * x1),
rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tl.store(out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr1 + x4, tmp3, xmask)
tmp5 = 8192.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_native_group_norm_relu_13(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 1024
x2 = xindex // 262144
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 8192.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_poi_fused_add_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + x0, None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, None)
@triton.jit
def triton_red_fused_native_group_norm_15(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = xindex // 32
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 16
r3 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r2 + 16 * x0 + 512 * r3 + 131072 * x1),
rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tl.store(out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr1 + x4, tmp3, xmask)
tmp5 = 4096.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_native_group_norm_relu_16(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 512
x2 = xindex // 131072
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 16), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 16), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 4096.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_per_fused_native_group_norm_17(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex % 16
r3 = rindex // 16
x0 = xindex % 32
x1 = xindex // 32
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + 16 * x0 + 512 * r3 + 32768 * x1), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + x4, tmp18, None)
tl.store(out_ptr0 + x4, tmp8, None)
tl.store(out_ptr1 + x4, tmp13, None)
@triton.jit
def triton_poi_fused_native_group_norm_relu_18(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 512
x2 = xindex // 32768
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 16), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 16), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 1024.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_poi_fused_add_19(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = tl.load(in_out_ptr0 + x0, None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, None)
@triton.jit
def triton_red_fused_native_group_norm_20(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = xindex // 32
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 64
r3 = rindex // 64
tmp0 = tl.load(in_ptr0 + (r2 + 64 * x0 + 2048 * r3 + 131072 * x1),
rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tl.store(out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr1 + x4, tmp3, xmask)
tmp5 = 4096.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_native_group_norm_relu_21(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 2048
x2 = xindex // 131072
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 64), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 64), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 4096.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_poi_fused_add_22(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + x0, None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, None)
@triton.jit
def triton_red_fused_native_group_norm_23(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = xindex // 32
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 32
r3 = rindex // 32
tmp0 = tl.load(in_ptr0 + (r2 + 32 * x0 + 1024 * r3 + 65536 * x1),
rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tl.store(out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr1 + x4, tmp3, xmask)
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_native_group_norm_relu_24(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 1024
x2 = xindex // 65536
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 2048.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_per_fused_native_group_norm_25(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex % 32
r3 = rindex // 32
x0 = xindex % 32
x1 = xindex // 32
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + 32 * x0 + 1024 * r3 + 16384 * x1), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 512, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 512.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + x4, tmp18, None)
tl.store(out_ptr0 + x4, tmp8, None)
tl.store(out_ptr1 + x4, tmp13, None)
@triton.jit
def triton_poi_fused_native_group_norm_relu_26(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 1024
x2 = xindex // 16384
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 512.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_poi_fused_add_27(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = tl.load(in_out_ptr0 + x0, None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, None)
@triton.jit
def triton_red_fused_native_group_norm_28(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = xindex // 32
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 128
r3 = rindex // 128
tmp0 = tl.load(in_ptr0 + (r2 + 128 * x0 + 4096 * r3 + 65536 * x1),
rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tl.store(out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr1 + x4, tmp3, xmask)
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_native_group_norm_relu_29(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 4096
x2 = xindex // 65536
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 128), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 128), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 2048.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_poi_fused_add_30(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + x0, None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, None)
@triton.jit
def triton_per_fused_native_group_norm_31(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex % 64
r3 = rindex // 64
x0 = xindex % 32
x1 = xindex // 32
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + 64 * x0 + 2048 * r3 + 32768 * x1), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + x4, tmp18, None)
tl.store(out_ptr0 + x4, tmp8, None)
tl.store(out_ptr1 + x4, tmp13, None)
@triton.jit
def triton_poi_fused_native_group_norm_relu_32(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 2048
x2 = xindex // 32768
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 64), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 64), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 1024.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_per_fused_native_group_norm_33(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex % 64
r3 = rindex // 64
x0 = xindex % 32
x1 = xindex // 32
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + 64 * x0 + 2048 * r3 + 8192 * x1), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 256, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 256.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + x4, tmp18, None)
tl.store(out_ptr0 + x4, tmp8, None)
tl.store(out_ptr1 + x4, tmp13, None)
@triton.jit
def triton_poi_fused_native_group_norm_relu_34(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 2048
x2 = xindex // 8192
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 64), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 64), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 256.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_poi_fused_add_35(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = tl.load(in_out_ptr0 + x0, None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, None)
@triton.jit
def triton_per_fused_native_group_norm_36(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex % 256
r3 = rindex // 256
x0 = xindex % 32
x1 = xindex // 32
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + 256 * x0 + 8192 * r3 + 32768 * x1), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + x4, tmp18, None)
tl.store(out_ptr0 + x4, tmp8, None)
tl.store(out_ptr1 + x4, tmp13, None)
@triton.jit
def triton_poi_fused_native_group_norm_relu_37(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 8192
x2 = xindex // 32768
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 256), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 256), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 1024.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_poi_fused_add_38(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + x0, None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, None)
@triton.jit
def triton_per_fused_native_group_norm_39(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex % 256
r3 = rindex // 256
x0 = xindex % 32
x1 = xindex // 32
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + 256 * x0 + 8192 * r3 + 32768 * x1), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.debug_barrier()
tl.store(in_out_ptr0 + x4, tmp18, None)
tl.store(out_ptr0 + x4, tmp8, None)
@triton.jit
def triton_poi_fused_mean_native_group_norm_relu_40(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8192
x1 = xindex // 8192
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 32768 * x1), None)
tmp1 = tl.load(in_ptr1 + x2 // 256, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x2 // 256, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (8192 + x0 + 32768 * x1), None)
tmp18 = tl.load(in_ptr0 + (16384 + x0 + 32768 * x1), None)
tmp25 = tl.load(in_ptr0 + (24576 + x0 + 32768 * x1), None)
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = tmp11 - tmp1
tmp13 = tmp12 * tmp3
tmp14 = tmp13 * tmp5
tmp15 = tmp14 + tmp7
tmp16 = triton_helpers.maximum(tmp9, tmp15)
tmp17 = tmp10 + tmp16
tmp19 = tmp18 - tmp1
tmp20 = tmp19 * tmp3
tmp21 = tmp20 * tmp5
tmp22 = tmp21 + tmp7
tmp23 = triton_helpers.maximum(tmp9, tmp22)
tmp24 = tmp17 + tmp23
tmp26 = tmp25 - tmp1
tmp27 = tmp26 * tmp3
tmp28 = tmp27 * tmp5
tmp29 = tmp28 + tmp7
tmp30 = triton_helpers.maximum(tmp9, tmp29)
tmp31 = tmp24 + tmp30
tmp32 = 4.0
tmp33 = tmp31 / tmp32
tl.store(out_ptr0 + x2, tmp33, None)
@triton.jit
def triton_poi_fused_convolution_41(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 87372
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 21843
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41, primals_42,
primals_43, primals_44, primals_45, primals_46, primals_47,
primals_48, primals_49, primals_50, primals_51, primals_52,
primals_53, primals_54, primals_55, primals_56, primals_57,
primals_58, primals_59, primals_60, primals_61, primals_62,
primals_63, primals_64, primals_65, primals_66, primals_67,
primals_68, primals_69, primals_70, primals_71, primals_72,
primals_73, primals_74, primals_75, primals_76, primals_77,
primals_78, primals_79, primals_80, primals_81, primals_82,
primals_83, primals_84, primals_85, primals_86, primals_87,
primals_88, primals_89, primals_90, primals_91, primals_92,
primals_93, primals_94, primals_95, primals_96, primals_97,
primals_98, primals_99, primals_100, primals_101, primals_102,
primals_103, primals_104, primals_105, primals_106, primals_107,
primals_108, primals_109, primals_110, primals_111, primals_112,
primals_113, primals_114, primals_115, primals_116, primals_117,
primals_118, primals_119, primals_120, primals_121, primals_122,
primals_123, primals_124, primals_125, primals_126, primals_127,
primals_128, primals_129, primals_130, primals_131, primals_132,
primals_133, primals_134, primals_135, primals_136, primals_137,
primals_138, primals_139, primals_140, primals_141, primals_142,
primals_143, primals_144, primals_145, primals_146, primals_147,
primals_148, primals_149, primals_150, primals_151, primals_152,
primals_153, primals_154) = args
args.clear()
assert_size_stride(primals_1, (256, 3, 7, 7), (147, 49, 7, 1))
assert_size_stride(primals_2, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_3, (256,), (1,))
assert_size_stride(primals_4, (256,), (1,))
assert_size_stride(primals_5, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_6, (256, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (256,), (1,))
assert_size_stride(primals_9, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_10, (256,), (1,))
assert_size_stride(primals_11, (256,), (1,))
assert_size_stride(primals_12, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_13, (1024,), (1,))
assert_size_stride(primals_14, (1024,), (1,))
assert_size_stride(primals_15, (256, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_16, (256,), (1,))
assert_size_stride(primals_17, (256,), (1,))
assert_size_stride(primals_18, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_19, (256,), (1,))
assert_size_stride(primals_20, (256,), (1,))
assert_size_stride(primals_21, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_22, (1024,), (1,))
assert_size_stride(primals_23, (1024,), (1,))
assert_size_stride(primals_24, (256, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_25, (256,), (1,))
assert_size_stride(primals_26, (256,), (1,))
assert_size_stride(primals_27, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_28, (256,), (1,))
assert_size_stride(primals_29, (256,), (1,))
assert_size_stride(primals_30, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_31, (1024,), (1,))
assert_size_stride(primals_32, (1024,), (1,))
assert_size_stride(primals_33, (256, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_34, (256,), (1,))
assert_size_stride(primals_35, (256,), (1,))
assert_size_stride(primals_36, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_37, (256,), (1,))
assert_size_stride(primals_38, (256,), (1,))
assert_size_stride(primals_39, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_40, (1024,), (1,))
assert_size_stride(primals_41, (1024,), (1,))
assert_size_stride(primals_42, (2048, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_43, (512, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_44, (512,), (1,))
assert_size_stride(primals_45, (512,), (1,))
assert_size_stride(primals_46, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_47, (512,), (1,))
assert_size_stride(primals_48, (512,), (1,))
assert_size_stride(primals_49, (2048, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_50, (2048,), (1,))
assert_size_stride(primals_51, (2048,), (1,))
assert_size_stride(primals_52, (512, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_53, (512,), (1,))
assert_size_stride(primals_54, (512,), (1,))
assert_size_stride(primals_55, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_56, (512,), (1,))
assert_size_stride(primals_57, (512,), (1,))
assert_size_stride(primals_58, (2048, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_59, (2048,), (1,))
assert_size_stride(primals_60, (2048,), (1,))
assert_size_stride(primals_61, (512, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_62, (512,), (1,))
assert_size_stride(primals_63, (512,), (1,))
assert_size_stride(primals_64, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_65, (512,), (1,))
assert_size_stride(primals_66, (512,), (1,))
assert_size_stride(primals_67, (2048, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_68, (2048,), (1,))
assert_size_stride(primals_69, (2048,), (1,))
assert_size_stride(primals_70, (512, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_71, (512,), (1,))
assert_size_stride(primals_72, (512,), (1,))
assert_size_stride(primals_73, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_74, (512,), (1,))
assert_size_stride(primals_75, (512,), (1,))
assert_size_stride(primals_76, (2048, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_77, (2048,), (1,))
assert_size_stride(primals_78, (2048,), (1,))
assert_size_stride(primals_79, (4096, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_80, (1024, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_81, (1024,), (1,))
assert_size_stride(primals_82, (1024,), (1,))
assert_size_stride(primals_83, (1024, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_84, (1024,), (1,))
assert_size_stride(primals_85, (1024,), (1,))
assert_size_stride(primals_86, (4096, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_87, (4096,), (1,))
assert_size_stride(primals_88, (4096,), (1,))
assert_size_stride(primals_89, (1024, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_90, (1024,), (1,))
assert_size_stride(primals_91, (1024,), (1,))
assert_size_stride(primals_92, (1024, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_93, (1024,), (1,))
assert_size_stride(primals_94, (1024,), (1,))
assert_size_stride(primals_95, (4096, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_96, (4096,), (1,))
assert_size_stride(primals_97, (4096,), (1,))
assert_size_stride(primals_98, (1024, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_99, (1024,), (1,))
assert_size_stride(primals_100, (1024,), (1,))
assert_size_stride(primals_101, (1024, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_102, (1024,), (1,))
assert_size_stride(primals_103, (1024,), (1,))
assert_size_stride(primals_104, (4096, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_105, (4096,), (1,))
assert_size_stride(primals_106, (4096,), (1,))
assert_size_stride(primals_107, (1024, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_108, (1024,), (1,))
assert_size_stride(primals_109, (1024,), (1,))
assert_size_stride(primals_110, (1024, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_111, (1024,), (1,))
assert_size_stride(primals_112, (1024,), (1,))
assert_size_stride(primals_113, (4096, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_114, (4096,), (1,))
assert_size_stride(primals_115, (4096,), (1,))
assert_size_stride(primals_116, (8192, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_117, (2048, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_118, (2048,), (1,))
assert_size_stride(primals_119, (2048,), (1,))
assert_size_stride(primals_120, (2048, 2048, 3, 3), (18432, 9, 3, 1))
assert_size_stride(primals_121, (2048,), (1,))
assert_size_stride(primals_122, (2048,), (1,))
assert_size_stride(primals_123, (8192, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_124, (8192,), (1,))
assert_size_stride(primals_125, (8192,), (1,))
assert_size_stride(primals_126, (2048, 8192, 1, 1), (8192, 1, 1, 1))
assert_size_stride(primals_127, (2048,), (1,))
assert_size_stride(primals_128, (2048,), (1,))
assert_size_stride(primals_129, (2048, 2048, 3, 3), (18432, 9, 3, 1))
assert_size_stride(primals_130, (2048,), (1,))
assert_size_stride(primals_131, (2048,), (1,))
assert_size_stride(primals_132, (8192, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_133, (8192,), (1,))
assert_size_stride(primals_134, (8192,), (1,))
assert_size_stride(primals_135, (2048, 8192, 1, 1), (8192, 1, 1, 1))
assert_size_stride(primals_136, (2048,), (1,))
assert_size_stride(primals_137, (2048,), (1,))
assert_size_stride(primals_138, (2048, 2048, 3, 3), (18432, 9, 3, 1))
assert_size_stride(primals_139, (2048,), (1,))
assert_size_stride(primals_140, (2048,), (1,))
assert_size_stride(primals_141, (8192, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_142, (8192,), (1,))
assert_size_stride(primals_143, (8192,), (1,))
assert_size_stride(primals_144, (2048, 8192, 1, 1), (8192, 1, 1, 1))
assert_size_stride(primals_145, (2048,), (1,))
assert_size_stride(primals_146, (2048,), (1,))
assert_size_stride(primals_147, (2048, 2048, 3, 3), (18432, 9, 3, 1))
assert_size_stride(primals_148, (2048,), (1,))
assert_size_stride(primals_149, (2048,), (1,))
assert_size_stride(primals_150, (8192, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_151, (8192,), (1,))
assert_size_stride(primals_152, (8192,), (1,))
assert_size_stride(primals_153, (21843, 8192, 1, 1), (8192, 1, 1, 1))
assert_size_stride(primals_154, (21843,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((256, 3, 7, 7), (147, 1, 21, 3), torch.
float32)
get_raw_stream(0)
triton_poi_fused_0[grid(768, 49)](primals_1, buf0, 768, 49, XBLOCK=
32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
triton_poi_fused_1[grid(12, 4096)](primals_2, buf1, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_2[grid(65536, 9)](primals_9, buf2, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_9
buf3 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_2[grid(65536, 9)](primals_18, buf3, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_18
buf4 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_2[grid(65536, 9)](primals_27, buf4, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_27
buf5 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_2[grid(65536, 9)](primals_36, buf5, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_36
buf6 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_3[grid(262144, 9)](primals_46, buf6, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_46
buf7 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_3[grid(262144, 9)](primals_55, buf7, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_55
buf8 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_3[grid(262144, 9)](primals_64, buf8, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_64
buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_3[grid(262144, 9)](primals_73, buf9, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_73
buf10 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024
), torch.float32)
triton_poi_fused_4[grid(1048576, 9)](primals_83, buf10, 1048576, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_83
buf11 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024
), torch.float32)
triton_poi_fused_4[grid(1048576, 9)](primals_92, buf11, 1048576, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_92
buf12 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024
), torch.float32)
triton_poi_fused_4[grid(1048576, 9)](primals_101, buf12, 1048576, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_101
buf13 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024
), torch.float32)
triton_poi_fused_4[grid(1048576, 9)](primals_110, buf13, 1048576, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_110
buf14 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144,
2048), torch.float32)
triton_poi_fused_5[grid(4194304, 9)](primals_120, buf14, 4194304, 9,
XBLOCK=16, YBLOCK=128, num_warps=8, num_stages=1)
del primals_120
buf15 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144,
2048), torch.float32)
triton_poi_fused_5[grid(4194304, 9)](primals_129, buf15, 4194304, 9,
XBLOCK=16, YBLOCK=128, num_warps=8, num_stages=1)
del primals_129
buf16 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144,
2048), torch.float32)
triton_poi_fused_5[grid(4194304, 9)](primals_138, buf16, 4194304, 9,
XBLOCK=16, YBLOCK=128, num_warps=8, num_stages=1)
del primals_138
buf17 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144,
2048), torch.float32)
triton_poi_fused_5[grid(4194304, 9)](primals_147, buf17, 4194304, 9,
XBLOCK=16, YBLOCK=128, num_warps=8, num_stages=1)
del primals_147
buf19 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256),
torch.float32)
buf21 = reinterpret_tensor(buf19, (256, 1, 1, 1), (1, 1, 1, 1), 0)
del buf19
buf22 = empty_strided_cuda((256, 3, 7, 7), (147, 1, 21, 3), torch.
float32)
triton_per_fused_add_div_sqrt_sub_var_mean_6[grid(256)](buf21, buf0,
buf22, 256, 147, XBLOCK=1, num_warps=2, num_stages=1)
buf23 = extern_kernels.convolution(buf1, buf22, stride=(2, 2),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf23, (4, 256, 32, 32), (262144, 1, 8192, 256))
buf24 = empty_strided_cuda((4, 256, 34, 34), (295936, 1, 8704, 256),
torch.float32)
triton_poi_fused_constant_pad_nd_7[grid(1183744)](buf23, buf24,
1183744, XBLOCK=1024, num_warps=4, num_stages=1)
buf25 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.float32)
buf26 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_8[grid(262144)](buf24,
buf25, buf26, 262144, XBLOCK=512, num_warps=8, num_stages=1)
buf27 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf28 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf30 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_9[grid(128)](buf25, buf27, buf28,
buf30, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1
)
buf31 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf25,
buf27, buf28, primals_3, primals_4, buf31, 262144, XBLOCK=1024,
num_warps=4, num_stages=1)
del primals_4
buf32 = extern_kernels.convolution(buf31, primals_5, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 1024, 16, 16), (262144, 1, 16384, 1024))
buf33 = extern_kernels.convolution(buf31, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf33, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf34 = buf28
del buf28
buf35 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf37 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_9[grid(128)](buf33, buf34, buf35,
buf37, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1
)
buf38 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf33,
buf34, buf35, primals_7, primals_8, buf38, 262144, XBLOCK=1024,
num_warps=4, num_stages=1)
del primals_8
buf39 = extern_kernels.convolution(buf38, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf39, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf40 = buf35
del buf35
buf41 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf43 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_9[grid(128)](buf39, buf40, buf41,
buf43, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1
)
buf44 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf39,
buf40, buf41, primals_10, primals_11, buf44, 262144, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_11
buf45 = extern_kernels.convolution(buf44, primals_12, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf45, (4, 1024, 16, 16), (262144, 1, 16384, 1024))
buf46 = buf32
del buf32
triton_poi_fused_add_11[grid(1048576)](buf46, buf45, 1048576,
XBLOCK=512, num_warps=8, num_stages=1)
buf47 = buf41
del buf41
buf48 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf50 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_12[grid(128)](buf46, buf47,
buf48, buf50, 128, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf51 = buf45
del buf45
triton_poi_fused_native_group_norm_relu_13[grid(1048576)](buf46,
buf47, buf48, primals_13, primals_14, buf51, 1048576, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_14
buf52 = extern_kernels.convolution(buf51, primals_15, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf52, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf53 = buf48
del buf48
buf54 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf56 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_9[grid(128)](buf52, buf53, buf54,
buf56, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1
)
buf57 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf52,
buf53, buf54, primals_16, primals_17, buf57, 262144, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_17
buf58 = extern_kernels.convolution(buf57, buf3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf58, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf59 = buf54
del buf54
buf60 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf62 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_9[grid(128)](buf58, buf59, buf60,
buf62, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1
)
buf63 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf58,
buf59, buf60, primals_19, primals_20, buf63, 262144, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_20
buf64 = extern_kernels.convolution(buf63, primals_21, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf64, (4, 1024, 16, 16), (262144, 1, 16384, 1024))
buf65 = buf64
del buf64
triton_poi_fused_add_14[grid(1048576)](buf65, buf46, 1048576,
XBLOCK=512, num_warps=8, num_stages=1)
buf66 = buf60
del buf60
buf67 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf69 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_12[grid(128)](buf65, buf66,
buf67, buf69, 128, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf70 = reinterpret_tensor(buf23, (4, 1024, 16, 16), (262144, 1,
16384, 1024), 0)
del buf23
triton_poi_fused_native_group_norm_relu_13[grid(1048576)](buf65,
buf66, buf67, primals_22, primals_23, buf70, 1048576, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_23
buf71 = extern_kernels.convolution(buf70, primals_24, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf71, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf72 = buf67
del buf67
buf73 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf75 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_9[grid(128)](buf71, buf72, buf73,
buf75, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1
)
buf76 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf71,
buf72, buf73, primals_25, primals_26, buf76, 262144, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_26
buf77 = extern_kernels.convolution(buf76, buf4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf77, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf78 = buf73
del buf73
buf79 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf81 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_9[grid(128)](buf77, buf78, buf79,
buf81, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1
)
buf82 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf77,
buf78, buf79, primals_28, primals_29, buf82, 262144, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_29
buf83 = extern_kernels.convolution(buf82, primals_30, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf83, (4, 1024, 16, 16), (262144, 1, 16384, 1024))
buf84 = buf83
del buf83
triton_poi_fused_add_14[grid(1048576)](buf84, buf65, 1048576,
XBLOCK=512, num_warps=8, num_stages=1)
buf85 = buf79
del buf79
buf86 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf88 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_12[grid(128)](buf84, buf85,
buf86, buf88, 128, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf89 = empty_strided_cuda((4, 1024, 16, 16), (262144, 1, 16384,
1024), torch.float32)
triton_poi_fused_native_group_norm_relu_13[grid(1048576)](buf84,
buf85, buf86, primals_31, primals_32, buf89, 1048576, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_32
buf90 = extern_kernels.convolution(buf89, primals_33, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf90, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf91 = buf86
del buf86
buf92 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf94 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_9[grid(128)](buf90, buf91, buf92,
buf94, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1
)
buf95 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf90,
buf91, buf92, primals_34, primals_35, buf95, 262144, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_35
buf96 = extern_kernels.convolution(buf95, buf5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf96, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf97 = buf92
del buf92
buf98 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf100 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_9[grid(128)](buf96, buf97, buf98,
buf100, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf101 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf96,
buf97, buf98, primals_37, primals_38, buf101, 262144, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_38
buf102 = extern_kernels.convolution(buf101, primals_39, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf102, (4, 1024, 16, 16), (262144, 1, 16384, 1024))
buf103 = buf102
del buf102
triton_poi_fused_add_14[grid(1048576)](buf103, buf84, 1048576,
XBLOCK=512, num_warps=8, num_stages=1)
buf104 = buf98
del buf98
buf105 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf107 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_12[grid(128)](buf103, buf104,
buf105, buf107, 128, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf108 = empty_strided_cuda((4, 1024, 16, 16), (262144, 1, 16384,
1024), torch.float32)
triton_poi_fused_native_group_norm_relu_13[grid(1048576)](buf103,
buf104, buf105, primals_40, primals_41, buf108, 1048576, XBLOCK
=512, num_warps=8, num_stages=1)
del primals_41
buf109 = extern_kernels.convolution(buf108, primals_42, stride=(2,
2), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf109, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf110 = extern_kernels.convolution(buf108, primals_43, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf110, (4, 512, 16, 16), (131072, 1, 8192, 512))
buf111 = buf105
del buf105
buf112 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf114 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_15[grid(128)](buf110, buf111,
buf112, buf114, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf115 = empty_strided_cuda((4, 512, 16, 16), (131072, 1, 8192, 512
), torch.float32)
triton_poi_fused_native_group_norm_relu_16[grid(524288)](buf110,
buf111, buf112, primals_44, primals_45, buf115, 524288, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_45
buf116 = extern_kernels.convolution(buf115, buf6, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf116, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf117 = buf112
del buf112
buf118 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf120 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_17[grid(128)](buf116, buf117,
buf118, buf120, 128, 1024, num_warps=8, num_stages=1)
buf121 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_18[grid(131072)](buf116,
buf117, buf118, primals_47, primals_48, buf121, 131072, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_48
buf122 = extern_kernels.convolution(buf121, primals_49, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf122, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf123 = buf109
del buf109
triton_poi_fused_add_19[grid(524288)](buf123, buf122, 524288,
XBLOCK=512, num_warps=8, num_stages=1)
buf124 = buf118
del buf118
buf125 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf127 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_20[grid(128)](buf123, buf124,
buf125, buf127, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf128 = buf122
del buf122
triton_poi_fused_native_group_norm_relu_21[grid(524288)](buf123,
buf124, buf125, primals_50, primals_51, buf128, 524288, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_51
buf129 = extern_kernels.convolution(buf128, primals_52, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf129, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf130 = buf125
del buf125
buf131 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf133 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_17[grid(128)](buf129, buf130,
buf131, buf133, 128, 1024, num_warps=8, num_stages=1)
buf134 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_18[grid(131072)](buf129,
buf130, buf131, primals_53, primals_54, buf134, 131072, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_54
buf135 = extern_kernels.convolution(buf134, buf7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf135, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf136 = buf131
del buf131
buf137 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf139 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_17[grid(128)](buf135, buf136,
buf137, buf139, 128, 1024, num_warps=8, num_stages=1)
buf140 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_18[grid(131072)](buf135,
buf136, buf137, primals_56, primals_57, buf140, 131072, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_57
buf141 = extern_kernels.convolution(buf140, primals_58, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf141, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf142 = buf141
del buf141
triton_poi_fused_add_22[grid(524288)](buf142, buf123, 524288,
XBLOCK=512, num_warps=8, num_stages=1)
buf143 = buf137
del buf137
buf144 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf146 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_20[grid(128)](buf142, buf143,
buf144, buf146, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf147 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384,
2048), torch.float32)
triton_poi_fused_native_group_norm_relu_21[grid(524288)](buf142,
buf143, buf144, primals_59, primals_60, buf147, 524288, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_60
buf148 = extern_kernels.convolution(buf147, primals_61, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf148, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf149 = buf144
del buf144
buf150 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf152 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_17[grid(128)](buf148, buf149,
buf150, buf152, 128, 1024, num_warps=8, num_stages=1)
buf153 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_18[grid(131072)](buf148,
buf149, buf150, primals_62, primals_63, buf153, 131072, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_63
buf154 = extern_kernels.convolution(buf153, buf8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf154, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf155 = buf150
del buf150
buf156 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf158 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_17[grid(128)](buf154, buf155,
buf156, buf158, 128, 1024, num_warps=8, num_stages=1)
buf159 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_18[grid(131072)](buf154,
buf155, buf156, primals_65, primals_66, buf159, 131072, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_66
buf160 = extern_kernels.convolution(buf159, primals_67, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf160, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf161 = buf160
del buf160
triton_poi_fused_add_22[grid(524288)](buf161, buf142, 524288,
XBLOCK=512, num_warps=8, num_stages=1)
buf162 = buf156
del buf156
buf163 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf165 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_20[grid(128)](buf161, buf162,
buf163, buf165, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf166 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384,
2048), torch.float32)
triton_poi_fused_native_group_norm_relu_21[grid(524288)](buf161,
buf162, buf163, primals_68, primals_69, buf166, 524288, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_69
buf167 = extern_kernels.convolution(buf166, primals_70, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf167, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf168 = buf163
del buf163
buf169 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf171 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_17[grid(128)](buf167, buf168,
buf169, buf171, 128, 1024, num_warps=8, num_stages=1)
buf172 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_18[grid(131072)](buf167,
buf168, buf169, primals_71, primals_72, buf172, 131072, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_72
buf173 = extern_kernels.convolution(buf172, buf9, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf173, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf174 = buf169
del buf169
buf175 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf177 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_17[grid(128)](buf173, buf174,
buf175, buf177, 128, 1024, num_warps=8, num_stages=1)
buf178 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_18[grid(131072)](buf173,
buf174, buf175, primals_74, primals_75, buf178, 131072, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_75
buf179 = extern_kernels.convolution(buf178, primals_76, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf179, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf180 = buf179
del buf179
triton_poi_fused_add_22[grid(524288)](buf180, buf161, 524288,
XBLOCK=512, num_warps=8, num_stages=1)
buf181 = buf175
del buf175
buf182 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf184 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_20[grid(128)](buf180, buf181,
buf182, buf184, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf185 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384,
2048), torch.float32)
triton_poi_fused_native_group_norm_relu_21[grid(524288)](buf180,
buf181, buf182, primals_77, primals_78, buf185, 524288, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_78
buf186 = extern_kernels.convolution(buf185, primals_79, stride=(2,
2), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf186, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf187 = extern_kernels.convolution(buf185, primals_80, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf187, (4, 1024, 8, 8), (65536, 1, 8192, 1024))
buf188 = buf182
del buf182
buf189 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf191 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_23[grid(128)](buf187, buf188,
buf189, buf191, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf192 = empty_strided_cuda((4, 1024, 8, 8), (65536, 1, 8192, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_24[grid(262144)](buf187,
buf188, buf189, primals_81, primals_82, buf192, 262144, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_82
buf193 = extern_kernels.convolution(buf192, buf10, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf193, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf194 = buf189
del buf189
buf195 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf197 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_25[grid(128)](buf193, buf194,
buf195, buf197, 128, 512, num_warps=4, num_stages=1)
buf198 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_26[grid(65536)](buf193,
buf194, buf195, primals_84, primals_85, buf198, 65536, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_85
buf199 = extern_kernels.convolution(buf198, primals_86, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf199, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf200 = buf186
del buf186
triton_poi_fused_add_27[grid(262144)](buf200, buf199, 262144,
XBLOCK=1024, num_warps=4, num_stages=1)
buf201 = buf195
del buf195
buf202 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf204 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_28[grid(128)](buf200, buf201,
buf202, buf204, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf205 = buf199
del buf199
triton_poi_fused_native_group_norm_relu_29[grid(262144)](buf200,
buf201, buf202, primals_87, primals_88, buf205, 262144, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_88
buf206 = extern_kernels.convolution(buf205, primals_89, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf206, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf207 = buf202
del buf202
buf208 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf210 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_25[grid(128)](buf206, buf207,
buf208, buf210, 128, 512, num_warps=4, num_stages=1)
buf211 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_26[grid(65536)](buf206,
buf207, buf208, primals_90, primals_91, buf211, 65536, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_91
buf212 = extern_kernels.convolution(buf211, buf11, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf212, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf213 = buf208
del buf208
buf214 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf216 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_25[grid(128)](buf212, buf213,
buf214, buf216, 128, 512, num_warps=4, num_stages=1)
buf217 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_26[grid(65536)](buf212,
buf213, buf214, primals_93, primals_94, buf217, 65536, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_94
buf218 = extern_kernels.convolution(buf217, primals_95, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf218, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf219 = buf218
del buf218
triton_poi_fused_add_30[grid(262144)](buf219, buf200, 262144,
XBLOCK=1024, num_warps=4, num_stages=1)
buf220 = buf214
del buf214
buf221 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf223 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_28[grid(128)](buf219, buf220,
buf221, buf223, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf224 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096
), torch.float32)
triton_poi_fused_native_group_norm_relu_29[grid(262144)](buf219,
buf220, buf221, primals_96, primals_97, buf224, 262144, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_97
buf225 = extern_kernels.convolution(buf224, primals_98, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf225, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf226 = buf221
del buf221
buf227 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf229 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_25[grid(128)](buf225, buf226,
buf227, buf229, 128, 512, num_warps=4, num_stages=1)
buf230 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_26[grid(65536)](buf225,
buf226, buf227, primals_99, primals_100, buf230, 65536, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_100
buf231 = extern_kernels.convolution(buf230, buf12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf231, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf232 = buf227
del buf227
buf233 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf235 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_25[grid(128)](buf231, buf232,
buf233, buf235, 128, 512, num_warps=4, num_stages=1)
buf236 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_26[grid(65536)](buf231,
buf232, buf233, primals_102, primals_103, buf236, 65536, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_103
buf237 = extern_kernels.convolution(buf236, primals_104, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf237, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf238 = buf237
del buf237
triton_poi_fused_add_30[grid(262144)](buf238, buf219, 262144,
XBLOCK=1024, num_warps=4, num_stages=1)
buf239 = buf233
del buf233
buf240 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf242 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_28[grid(128)](buf238, buf239,
buf240, buf242, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf243 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096
), torch.float32)
triton_poi_fused_native_group_norm_relu_29[grid(262144)](buf238,
buf239, buf240, primals_105, primals_106, buf243, 262144,
XBLOCK=1024, num_warps=4, num_stages=1)
del primals_106
buf244 = extern_kernels.convolution(buf243, primals_107, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf244, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf245 = buf240
del buf240
buf246 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf248 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_25[grid(128)](buf244, buf245,
buf246, buf248, 128, 512, num_warps=4, num_stages=1)
buf249 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_26[grid(65536)](buf244,
buf245, buf246, primals_108, primals_109, buf249, 65536, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_109
buf250 = extern_kernels.convolution(buf249, buf13, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf250, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf251 = buf246
del buf246
buf252 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf254 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_25[grid(128)](buf250, buf251,
buf252, buf254, 128, 512, num_warps=4, num_stages=1)
buf255 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_26[grid(65536)](buf250,
buf251, buf252, primals_111, primals_112, buf255, 65536, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_112
buf256 = extern_kernels.convolution(buf255, primals_113, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf256, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf257 = buf256
del buf256
triton_poi_fused_add_30[grid(262144)](buf257, buf238, 262144,
XBLOCK=1024, num_warps=4, num_stages=1)
buf258 = buf252
del buf252
buf259 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf261 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_28[grid(128)](buf257, buf258,
buf259, buf261, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf262 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096
), torch.float32)
triton_poi_fused_native_group_norm_relu_29[grid(262144)](buf257,
buf258, buf259, primals_114, primals_115, buf262, 262144,
XBLOCK=1024, num_warps=4, num_stages=1)
del primals_115
buf263 = extern_kernels.convolution(buf262, primals_116, stride=(2,
2), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf263, (4, 8192, 2, 2), (32768, 1, 16384, 8192))
buf264 = extern_kernels.convolution(buf262, primals_117, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf264, (4, 2048, 4, 4), (32768, 1, 8192, 2048))
buf265 = buf259
del buf259
buf266 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf268 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_31[grid(128)](buf264, buf265,
buf266, buf268, 128, 1024, num_warps=8, num_stages=1)
buf269 = empty_strided_cuda((4, 2048, 4, 4), (32768, 1, 8192, 2048),
torch.float32)
triton_poi_fused_native_group_norm_relu_32[grid(131072)](buf264,
buf265, buf266, primals_118, primals_119, buf269, 131072,
XBLOCK=512, num_warps=8, num_stages=1)
del primals_119
buf270 = extern_kernels.convolution(buf269, buf14, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf270, (4, 2048, 2, 2), (8192, 1, 4096, 2048))
buf271 = buf266
del buf266
buf272 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf274 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_33[grid(128)](buf270, buf271,
buf272, buf274, 128, 256, num_warps=2, num_stages=1)
buf275 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048),
torch.float32)
triton_poi_fused_native_group_norm_relu_34[grid(32768)](buf270,
buf271, buf272, primals_121, primals_122, buf275, 32768, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_122
buf276 = extern_kernels.convolution(buf275, primals_123, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf276, (4, 8192, 2, 2), (32768, 1, 16384, 8192))
buf277 = buf263
del buf263
triton_poi_fused_add_35[grid(131072)](buf277, buf276, 131072,
XBLOCK=512, num_warps=8, num_stages=1)
buf278 = buf272
del buf272
buf279 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf281 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_36[grid(128)](buf277, buf278,
buf279, buf281, 128, 1024, num_warps=8, num_stages=1)
buf282 = buf276
del buf276
triton_poi_fused_native_group_norm_relu_37[grid(131072)](buf277,
buf278, buf279, primals_124, primals_125, buf282, 131072,
XBLOCK=512, num_warps=8, num_stages=1)
del primals_125
buf283 = extern_kernels.convolution(buf282, primals_126, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf283, (4, 2048, 2, 2), (8192, 1, 4096, 2048))
buf284 = buf279
del buf279
buf285 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf287 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_33[grid(128)](buf283, buf284,
buf285, buf287, 128, 256, num_warps=2, num_stages=1)
buf288 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048),
torch.float32)
triton_poi_fused_native_group_norm_relu_34[grid(32768)](buf283,
buf284, buf285, primals_127, primals_128, buf288, 32768, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_128
buf289 = extern_kernels.convolution(buf288, buf15, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf289, (4, 2048, 2, 2), (8192, 1, 4096, 2048))
buf290 = buf285
del buf285
buf291 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf293 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_33[grid(128)](buf289, buf290,
buf291, buf293, 128, 256, num_warps=2, num_stages=1)
buf294 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048),
torch.float32)
triton_poi_fused_native_group_norm_relu_34[grid(32768)](buf289,
buf290, buf291, primals_130, primals_131, buf294, 32768, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_131
buf295 = extern_kernels.convolution(buf294, primals_132, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf295, (4, 8192, 2, 2), (32768, 1, 16384, 8192))
buf296 = buf295
del buf295
triton_poi_fused_add_38[grid(131072)](buf296, buf277, 131072,
XBLOCK=512, num_warps=8, num_stages=1)
buf297 = buf291
del buf291
buf298 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf300 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_36[grid(128)](buf296, buf297,
buf298, buf300, 128, 1024, num_warps=8, num_stages=1)
buf301 = empty_strided_cuda((4, 8192, 2, 2), (32768, 1, 16384, 8192
), torch.float32)
triton_poi_fused_native_group_norm_relu_37[grid(131072)](buf296,
buf297, buf298, primals_133, primals_134, buf301, 131072,
XBLOCK=512, num_warps=8, num_stages=1)
del primals_134
buf302 = extern_kernels.convolution(buf301, primals_135, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf302, (4, 2048, 2, 2), (8192, 1, 4096, 2048))
buf303 = buf298
del buf298
buf304 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf306 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_33[grid(128)](buf302, buf303,
buf304, buf306, 128, 256, num_warps=2, num_stages=1)
buf307 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048),
torch.float32)
triton_poi_fused_native_group_norm_relu_34[grid(32768)](buf302,
buf303, buf304, primals_136, primals_137, buf307, 32768, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_137
buf308 = extern_kernels.convolution(buf307, buf16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf308, (4, 2048, 2, 2), (8192, 1, 4096, 2048))
buf309 = buf304
del buf304
buf310 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf312 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_33[grid(128)](buf308, buf309,
buf310, buf312, 128, 256, num_warps=2, num_stages=1)
buf313 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048),
torch.float32)
triton_poi_fused_native_group_norm_relu_34[grid(32768)](buf308,
buf309, buf310, primals_139, primals_140, buf313, 32768, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_140
buf314 = extern_kernels.convolution(buf313, primals_141, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf314, (4, 8192, 2, 2), (32768, 1, 16384, 8192))
buf315 = buf314
del buf314
triton_poi_fused_add_38[grid(131072)](buf315, buf296, 131072,
XBLOCK=512, num_warps=8, num_stages=1)
buf316 = buf310
del buf310
buf317 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf319 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_36[grid(128)](buf315, buf316,
buf317, buf319, 128, 1024, num_warps=8, num_stages=1)
buf320 = empty_strided_cuda((4, 8192, 2, 2), (32768, 1, 16384, 8192
), torch.float32)
triton_poi_fused_native_group_norm_relu_37[grid(131072)](buf315,
buf316, buf317, primals_142, primals_143, buf320, 131072,
XBLOCK=512, num_warps=8, num_stages=1)
del primals_143
buf321 = extern_kernels.convolution(buf320, primals_144, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf321, (4, 2048, 2, 2), (8192, 1, 4096, 2048))
buf322 = buf317
del buf317
buf323 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf325 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_33[grid(128)](buf321, buf322,
buf323, buf325, 128, 256, num_warps=2, num_stages=1)
buf326 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048),
torch.float32)
triton_poi_fused_native_group_norm_relu_34[grid(32768)](buf321,
buf322, buf323, primals_145, primals_146, buf326, 32768, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_146
buf327 = extern_kernels.convolution(buf326, buf17, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf327, (4, 2048, 2, 2), (8192, 1, 4096, 2048))
buf328 = buf323
del buf323
buf329 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf331 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_33[grid(128)](buf327, buf328,
buf329, buf331, 128, 256, num_warps=2, num_stages=1)
buf332 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048),
torch.float32)
triton_poi_fused_native_group_norm_relu_34[grid(32768)](buf327,
buf328, buf329, primals_148, primals_149, buf332, 32768, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_149
buf333 = extern_kernels.convolution(buf332, primals_150, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf333, (4, 8192, 2, 2), (32768, 1, 16384, 8192))
buf334 = buf333
del buf333
triton_poi_fused_add_38[grid(131072)](buf334, buf315, 131072,
XBLOCK=512, num_warps=8, num_stages=1)
buf335 = reinterpret_tensor(buf329, (4, 32, 1, 1), (32, 1, 32, 32), 0)
del buf329
buf336 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf338 = reinterpret_tensor(buf336, (4, 32, 1, 1), (32, 1, 32, 32), 0)
del buf336
triton_per_fused_native_group_norm_39[grid(128)](buf338, buf334,
buf335, 128, 1024, num_warps=8, num_stages=1)
buf339 = empty_strided_cuda((4, 8192, 1, 1), (8192, 1, 8192, 8192),
torch.float32)
triton_poi_fused_mean_native_group_norm_relu_40[grid(32768)](buf334,
buf335, buf338, primals_151, primals_152, buf339, 32768, XBLOCK
=256, num_warps=4, num_stages=1)
buf340 = extern_kernels.convolution(buf339, primals_153, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf340, (4, 21843, 1, 1), (21843, 1, 21843, 21843))
buf341 = reinterpret_tensor(buf340, (4, 21843, 1, 1), (21843, 1,
87372, 87372), 0)
del buf340
triton_poi_fused_convolution_41[grid(87372)](buf341, primals_154,
87372, XBLOCK=512, num_warps=8, num_stages=1)
del primals_154
return (reinterpret_tensor(buf341, (4, 21843), (21843, 1), 0), buf0,
buf1, primals_3, primals_5, primals_6, primals_7, buf2, primals_10,
primals_12, primals_13, primals_15, primals_16, buf3, primals_19,
primals_21, primals_22, primals_24, primals_25, buf4, primals_28,
primals_30, primals_31, primals_33, primals_34, buf5, primals_37,
primals_39, primals_40, primals_42, primals_43, primals_44, buf6,
primals_47, primals_49, primals_50, primals_52, primals_53, buf7,
primals_56, primals_58, primals_59, primals_61, primals_62, buf8,
primals_65, primals_67, primals_68, primals_70, primals_71, buf9,
primals_74, primals_76, primals_77, primals_79, primals_80,
primals_81, buf10, primals_84, primals_86, primals_87, primals_89,
primals_90, buf11, primals_93, primals_95, primals_96, primals_98,
primals_99, buf12, primals_102, primals_104, primals_105,
primals_107, primals_108, buf13, primals_111, primals_113,
primals_114, primals_116, primals_117, primals_118, buf14,
primals_121, primals_123, primals_124, primals_126, primals_127,
buf15, primals_130, primals_132, primals_133, primals_135,
primals_136, buf16, primals_139, primals_141, primals_142,
primals_144, primals_145, buf17, primals_148, primals_150,
primals_151, primals_152, primals_153, buf21, buf22, buf24, buf25,
buf26, reinterpret_tensor(buf27, (4, 32), (32, 1), 0),
reinterpret_tensor(buf30, (4, 32), (32, 1), 0), buf31, buf33,
reinterpret_tensor(buf34, (4, 32), (32, 1), 0), reinterpret_tensor(
buf37, (4, 32), (32, 1), 0), buf38, buf39, reinterpret_tensor(buf40,
(4, 32), (32, 1), 0), reinterpret_tensor(buf43, (4, 32), (32, 1), 0
), buf44, buf46, reinterpret_tensor(buf47, (4, 32), (32, 1), 0),
reinterpret_tensor(buf50, (4, 32), (32, 1), 0), buf51, buf52,
reinterpret_tensor(buf53, (4, 32), (32, 1), 0), reinterpret_tensor(
buf56, (4, 32), (32, 1), 0), buf57, buf58, reinterpret_tensor(buf59,
(4, 32), (32, 1), 0), reinterpret_tensor(buf62, (4, 32), (32, 1), 0
), buf63, buf65, reinterpret_tensor(buf66, (4, 32), (32, 1), 0),
reinterpret_tensor(buf69, (4, 32), (32, 1), 0), buf70, buf71,
reinterpret_tensor(buf72, (4, 32), (32, 1), 0), reinterpret_tensor(
buf75, (4, 32), (32, 1), 0), buf76, buf77, reinterpret_tensor(buf78,
(4, 32), (32, 1), 0), reinterpret_tensor(buf81, (4, 32), (32, 1), 0
), buf82, buf84, reinterpret_tensor(buf85, (4, 32), (32, 1), 0),
reinterpret_tensor(buf88, (4, 32), (32, 1), 0), buf89, buf90,
reinterpret_tensor(buf91, (4, 32), (32, 1), 0), reinterpret_tensor(
buf94, (4, 32), (32, 1), 0), buf95, buf96, reinterpret_tensor(buf97,
(4, 32), (32, 1), 0), reinterpret_tensor(buf100, (4, 32), (32, 1),
0), buf101, buf103, reinterpret_tensor(buf104, (4, 32), (32, 1), 0),
reinterpret_tensor(buf107, (4, 32), (32, 1), 0), buf108, buf110,
reinterpret_tensor(buf111, (4, 32), (32, 1), 0), reinterpret_tensor
(buf114, (4, 32), (32, 1), 0), buf115, buf116, reinterpret_tensor(
buf117, (4, 32), (32, 1), 0), reinterpret_tensor(buf120, (4, 32), (
32, 1), 0), buf121, buf123, reinterpret_tensor(buf124, (4, 32), (32,
1), 0), reinterpret_tensor(buf127, (4, 32), (32, 1), 0), buf128,
buf129, reinterpret_tensor(buf130, (4, 32), (32, 1), 0),
reinterpret_tensor(buf133, (4, 32), (32, 1), 0), buf134, buf135,
reinterpret_tensor(buf136, (4, 32), (32, 1), 0), reinterpret_tensor
(buf139, (4, 32), (32, 1), 0), buf140, buf142, reinterpret_tensor(
buf143, (4, 32), (32, 1), 0), reinterpret_tensor(buf146, (4, 32), (
32, 1), 0), buf147, buf148, reinterpret_tensor(buf149, (4, 32), (32,
1), 0), reinterpret_tensor(buf152, (4, 32), (32, 1), 0), buf153,
buf154, reinterpret_tensor(buf155, (4, 32), (32, 1), 0),
reinterpret_tensor(buf158, (4, 32), (32, 1), 0), buf159, buf161,
reinterpret_tensor(buf162, (4, 32), (32, 1), 0), reinterpret_tensor
(buf165, (4, 32), (32, 1), 0), buf166, buf167, reinterpret_tensor(
buf168, (4, 32), (32, 1), 0), reinterpret_tensor(buf171, (4, 32), (
32, 1), 0), buf172, buf173, reinterpret_tensor(buf174, (4, 32), (32,
1), 0), reinterpret_tensor(buf177, (4, 32), (32, 1), 0), buf178,
buf180, reinterpret_tensor(buf181, (4, 32), (32, 1), 0),
reinterpret_tensor(buf184, (4, 32), (32, 1), 0), buf185, buf187,
reinterpret_tensor(buf188, (4, 32), (32, 1), 0), reinterpret_tensor
(buf191, (4, 32), (32, 1), 0), buf192, buf193, reinterpret_tensor(
buf194, (4, 32), (32, 1), 0), reinterpret_tensor(buf197, (4, 32), (
32, 1), 0), buf198, buf200, reinterpret_tensor(buf201, (4, 32), (32,
1), 0), reinterpret_tensor(buf204, (4, 32), (32, 1), 0), buf205,
buf206, reinterpret_tensor(buf207, (4, 32), (32, 1), 0),
reinterpret_tensor(buf210, (4, 32), (32, 1), 0), buf211, buf212,
reinterpret_tensor(buf213, (4, 32), (32, 1), 0), reinterpret_tensor
(buf216, (4, 32), (32, 1), 0), buf217, buf219, reinterpret_tensor(
buf220, (4, 32), (32, 1), 0), reinterpret_tensor(buf223, (4, 32), (
32, 1), 0), buf224, buf225, reinterpret_tensor(buf226, (4, 32), (32,
1), 0), reinterpret_tensor(buf229, (4, 32), (32, 1), 0), buf230,
buf231, reinterpret_tensor(buf232, (4, 32), (32, 1), 0),
reinterpret_tensor(buf235, (4, 32), (32, 1), 0), buf236, buf238,
reinterpret_tensor(buf239, (4, 32), (32, 1), 0), reinterpret_tensor
(buf242, (4, 32), (32, 1), 0), buf243, buf244, reinterpret_tensor(
buf245, (4, 32), (32, 1), 0), reinterpret_tensor(buf248, (4, 32), (
32, 1), 0), buf249, buf250, reinterpret_tensor(buf251, (4, 32), (32,
1), 0), reinterpret_tensor(buf254, (4, 32), (32, 1), 0), buf255,
buf257, reinterpret_tensor(buf258, (4, 32), (32, 1), 0),
reinterpret_tensor(buf261, (4, 32), (32, 1), 0), buf262, buf264,
reinterpret_tensor(buf265, (4, 32), (32, 1), 0), reinterpret_tensor
(buf268, (4, 32), (32, 1), 0), buf269, buf270, reinterpret_tensor(
buf271, (4, 32), (32, 1), 0), reinterpret_tensor(buf274, (4, 32), (
32, 1), 0), buf275, buf277, reinterpret_tensor(buf278, (4, 32), (32,
1), 0), reinterpret_tensor(buf281, (4, 32), (32, 1), 0), buf282,
buf283, reinterpret_tensor(buf284, (4, 32), (32, 1), 0),
reinterpret_tensor(buf287, (4, 32), (32, 1), 0), buf288, buf289,
reinterpret_tensor(buf290, (4, 32), (32, 1), 0), reinterpret_tensor
(buf293, (4, 32), (32, 1), 0), buf294, buf296, reinterpret_tensor(
buf297, (4, 32), (32, 1), 0), reinterpret_tensor(buf300, (4, 32), (
32, 1), 0), buf301, buf302, reinterpret_tensor(buf303, (4, 32), (32,
1), 0), reinterpret_tensor(buf306, (4, 32), (32, 1), 0), buf307,
buf308, reinterpret_tensor(buf309, (4, 32), (32, 1), 0),
reinterpret_tensor(buf312, (4, 32), (32, 1), 0), buf313, buf315,
reinterpret_tensor(buf316, (4, 32), (32, 1), 0), reinterpret_tensor
(buf319, (4, 32), (32, 1), 0), buf320, buf321, reinterpret_tensor(
buf322, (4, 32), (32, 1), 0), reinterpret_tensor(buf325, (4, 32), (
32, 1), 0), buf326, buf327, reinterpret_tensor(buf328, (4, 32), (32,
1), 0), reinterpret_tensor(buf331, (4, 32), (32, 1), 0), buf332,
buf334, buf335, buf338, buf339)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
bias=False)
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def tf2th(conv_weights):
"""Possibly convert HWIO to OIHW."""
if conv_weights.ndim == 4:
conv_weights = conv_weights.transpose([3, 2, 0, 1])
return torch.from_numpy(conv_weights)
class StdConv2d(nn.Conv2d):
def forward(self, x):
w = self.weight
v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False)
w = (w - m) / torch.sqrt(v + 1e-10)
return F.conv2d(x, w, self.bias, self.stride, self.padding, self.
dilation, self.groups)
class PreActBottleneck(nn.Module):
"""Pre-activation (v2) bottleneck block.
Follows the implementation of "Identity Mappings in Deep Residual Networks":
https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua
Except it puts the stride on 3x3 conv when available.
"""
def __init__(self, cin, cout=None, cmid=None, stride=1):
super().__init__()
cout = cout or cin
cmid = cmid or cout // 4
self.gn1 = nn.GroupNorm(32, cin)
self.conv1 = conv1x1(cin, cmid)
self.gn2 = nn.GroupNorm(32, cmid)
self.conv2 = conv3x3(cmid, cmid, stride)
self.gn3 = nn.GroupNorm(32, cmid)
self.conv3 = conv1x1(cmid, cout)
self.relu = nn.ReLU(inplace=True)
if stride != 1 or cin != cout:
self.downsample = conv1x1(cin, cout, stride)
def forward(self, x):
out = self.relu(self.gn1(x))
residual = x
if hasattr(self, 'downsample'):
residual = self.downsample(out)
out = self.conv1(out)
out = self.conv2(self.relu(self.gn2(out)))
out = self.conv3(self.relu(self.gn3(out)))
return out + residual
def load_from(self, weights, prefix=''):
convname = 'standardized_conv2d'
with torch.no_grad():
self.conv1.weight.copy_(tf2th(weights[
f'{prefix}a/{convname}/kernel']))
self.conv2.weight.copy_(tf2th(weights[
f'{prefix}b/{convname}/kernel']))
self.conv3.weight.copy_(tf2th(weights[
f'{prefix}c/{convname}/kernel']))
self.gn1.weight.copy_(tf2th(weights[f'{prefix}a/group_norm/gamma'])
)
self.gn2.weight.copy_(tf2th(weights[f'{prefix}b/group_norm/gamma'])
)
self.gn3.weight.copy_(tf2th(weights[f'{prefix}c/group_norm/gamma'])
)
self.gn1.bias.copy_(tf2th(weights[f'{prefix}a/group_norm/beta']))
self.gn2.bias.copy_(tf2th(weights[f'{prefix}b/group_norm/beta']))
self.gn3.bias.copy_(tf2th(weights[f'{prefix}c/group_norm/beta']))
if hasattr(self, 'downsample'):
w = weights[f'{prefix}a/proj/{convname}/kernel']
self.downsample.weight.copy_(tf2th(w))
class ResNetV2New(nn.Module):
"""Implementation of Pre-activation (v2) ResNet mode."""
def __init__(self, block_units, width_factor, head_size=21843,
zero_head=False):
super().__init__()
wf = width_factor
self.root = nn.Sequential(OrderedDict([('conv', StdConv2d(3, 64 *
wf, kernel_size=7, stride=2, padding=3, bias=False)), ('pad',
nn.ConstantPad2d(1, 0)), ('pool', nn.MaxPool2d(kernel_size=3,
stride=2, padding=0))]))
self.body = nn.Sequential(OrderedDict([('block1', nn.Sequential(
OrderedDict([('unit01', PreActBottleneck(cin=64 * wf, cout=256 *
wf, cmid=64 * wf))] + [(f'unit{i:02d}', PreActBottleneck(cin=
256 * wf, cout=256 * wf, cmid=64 * wf)) for i in range(2,
block_units[0] + 1)]))), ('block2', nn.Sequential(OrderedDict([
('unit01', PreActBottleneck(cin=256 * wf, cout=512 * wf, cmid=
128 * wf, stride=2))] + [(f'unit{i:02d}', PreActBottleneck(cin=
512 * wf, cout=512 * wf, cmid=128 * wf)) for i in range(2,
block_units[1] + 1)]))), ('block3', nn.Sequential(OrderedDict([
('unit01', PreActBottleneck(cin=512 * wf, cout=1024 * wf, cmid=
256 * wf, stride=2))] + [(f'unit{i:02d}', PreActBottleneck(cin=
1024 * wf, cout=1024 * wf, cmid=256 * wf)) for i in range(2,
block_units[2] + 1)]))), ('block4', nn.Sequential(OrderedDict([
('unit01', PreActBottleneck(cin=1024 * wf, cout=2048 * wf, cmid
=512 * wf, stride=2))] + [(f'unit{i:02d}', PreActBottleneck(cin
=2048 * wf, cout=2048 * wf, cmid=512 * wf)) for i in range(2,
block_units[3] + 1)])))]))
self.zero_head = zero_head
self.head = nn.Sequential(OrderedDict([('gn', nn.GroupNorm(32, 2048 *
wf)), ('relu', nn.ReLU(inplace=True)), ('avg', nn.
AdaptiveAvgPool2d(output_size=1)), ('conv', nn.Conv2d(2048 * wf,
head_size, kernel_size=1, bias=True))]))
def load_from(self, weights, prefix='resnet/'):
with torch.no_grad():
self.root.conv.weight.copy_(tf2th(weights[
f'{prefix}root_block/standardized_conv2d/kernel']))
self.head.gn.weight.copy_(tf2th(weights[
f'{prefix}group_norm/gamma']))
self.head.gn.bias.copy_(tf2th(weights[f'{prefix}group_norm/beta']))
if self.zero_head:
nn.init.zeros_(self.head.conv.weight)
nn.init.zeros_(self.head.conv.bias)
else:
self.head.conv.weight.copy_(tf2th(weights[
f'{prefix}head/conv2d/kernel']))
self.head.conv.bias.copy_(tf2th(weights[
f'{prefix}head/conv2d/bias']))
for bname, block in self.body.named_children():
for uname, unit in block.named_children():
unit.load_from(weights, prefix=f'{prefix}{bname}/{uname}/')
def forward(self, input_0):
primals_1 = self.root.conv.weight
primals_3 = self.body.block1.unit01.gn1.weight
primals_4 = self.body.block1.unit01.gn1.bias
primals_6 = self.body.block1.unit01.conv1.weight
primals_7 = self.body.block1.unit01.gn2.weight
primals_8 = self.body.block1.unit01.gn2.bias
primals_9 = self.body.block1.unit01.conv2.weight
primals_10 = self.body.block1.unit01.gn3.weight
primals_11 = self.body.block1.unit01.gn3.bias
primals_5 = self.body.block1.unit01.conv3.weight
primals_12 = self.body.block1.unit01.downsample.weight
primals_13 = self.body.block1.unit02.gn1.weight
primals_14 = self.body.block1.unit02.gn1.bias
primals_15 = self.body.block1.unit02.conv1.weight
primals_16 = self.body.block1.unit02.gn2.weight
primals_17 = self.body.block1.unit02.gn2.bias
primals_18 = self.body.block1.unit02.conv2.weight
primals_19 = self.body.block1.unit02.gn3.weight
primals_20 = self.body.block1.unit02.gn3.bias
primals_21 = self.body.block1.unit02.conv3.weight
primals_22 = self.body.block1.unit03.gn1.weight
primals_23 = self.body.block1.unit03.gn1.bias
primals_24 = self.body.block1.unit03.conv1.weight
primals_25 = self.body.block1.unit03.gn2.weight
primals_26 = self.body.block1.unit03.gn2.bias
primals_27 = self.body.block1.unit03.conv2.weight
primals_28 = self.body.block1.unit03.gn3.weight
primals_29 = self.body.block1.unit03.gn3.bias
primals_30 = self.body.block1.unit03.conv3.weight
primals_31 = self.body.block1.unit04.gn1.weight
primals_32 = self.body.block1.unit04.gn1.bias
primals_33 = self.body.block1.unit04.conv1.weight
primals_34 = self.body.block1.unit04.gn2.weight
primals_35 = self.body.block1.unit04.gn2.bias
primals_36 = self.body.block1.unit04.conv2.weight
primals_37 = self.body.block1.unit04.gn3.weight
primals_38 = self.body.block1.unit04.gn3.bias
primals_39 = self.body.block1.unit04.conv3.weight
primals_40 = self.body.block2.unit01.gn1.weight
primals_41 = self.body.block2.unit01.gn1.bias
primals_43 = self.body.block2.unit01.conv1.weight
primals_44 = self.body.block2.unit01.gn2.weight
primals_45 = self.body.block2.unit01.gn2.bias
primals_46 = self.body.block2.unit01.conv2.weight
primals_47 = self.body.block2.unit01.gn3.weight
primals_48 = self.body.block2.unit01.gn3.bias
primals_49 = self.body.block2.unit01.conv3.weight
primals_42 = self.body.block2.unit01.downsample.weight
primals_50 = self.body.block2.unit02.gn1.weight
primals_51 = self.body.block2.unit02.gn1.bias
primals_52 = self.body.block2.unit02.conv1.weight
primals_53 = self.body.block2.unit02.gn2.weight
primals_54 = self.body.block2.unit02.gn2.bias
primals_55 = self.body.block2.unit02.conv2.weight
primals_56 = self.body.block2.unit02.gn3.weight
primals_57 = self.body.block2.unit02.gn3.bias
primals_58 = self.body.block2.unit02.conv3.weight
primals_59 = self.body.block2.unit03.gn1.weight
primals_60 = self.body.block2.unit03.gn1.bias
primals_61 = self.body.block2.unit03.conv1.weight
primals_62 = self.body.block2.unit03.gn2.weight
primals_63 = self.body.block2.unit03.gn2.bias
primals_64 = self.body.block2.unit03.conv2.weight
primals_65 = self.body.block2.unit03.gn3.weight
primals_66 = self.body.block2.unit03.gn3.bias
primals_67 = self.body.block2.unit03.conv3.weight
primals_68 = self.body.block2.unit04.gn1.weight
primals_69 = self.body.block2.unit04.gn1.bias
primals_70 = self.body.block2.unit04.conv1.weight
primals_71 = self.body.block2.unit04.gn2.weight
primals_72 = self.body.block2.unit04.gn2.bias
primals_73 = self.body.block2.unit04.conv2.weight
primals_74 = self.body.block2.unit04.gn3.weight
primals_75 = self.body.block2.unit04.gn3.bias
primals_76 = self.body.block2.unit04.conv3.weight
primals_77 = self.body.block3.unit01.gn1.weight
primals_78 = self.body.block3.unit01.gn1.bias
primals_80 = self.body.block3.unit01.conv1.weight
primals_81 = self.body.block3.unit01.gn2.weight
primals_82 = self.body.block3.unit01.gn2.bias
primals_83 = self.body.block3.unit01.conv2.weight
primals_84 = self.body.block3.unit01.gn3.weight
primals_85 = self.body.block3.unit01.gn3.bias
primals_86 = self.body.block3.unit01.conv3.weight
primals_79 = self.body.block3.unit01.downsample.weight
primals_87 = self.body.block3.unit02.gn1.weight
primals_88 = self.body.block3.unit02.gn1.bias
primals_89 = self.body.block3.unit02.conv1.weight
primals_90 = self.body.block3.unit02.gn2.weight
primals_91 = self.body.block3.unit02.gn2.bias
primals_92 = self.body.block3.unit02.conv2.weight
primals_93 = self.body.block3.unit02.gn3.weight
primals_94 = self.body.block3.unit02.gn3.bias
primals_95 = self.body.block3.unit02.conv3.weight
primals_96 = self.body.block3.unit03.gn1.weight
primals_97 = self.body.block3.unit03.gn1.bias
primals_98 = self.body.block3.unit03.conv1.weight
primals_99 = self.body.block3.unit03.gn2.weight
primals_100 = self.body.block3.unit03.gn2.bias
primals_101 = self.body.block3.unit03.conv2.weight
primals_102 = self.body.block3.unit03.gn3.weight
primals_103 = self.body.block3.unit03.gn3.bias
primals_104 = self.body.block3.unit03.conv3.weight
primals_105 = self.body.block3.unit04.gn1.weight
primals_106 = self.body.block3.unit04.gn1.bias
primals_107 = self.body.block3.unit04.conv1.weight
primals_108 = self.body.block3.unit04.gn2.weight
primals_109 = self.body.block3.unit04.gn2.bias
primals_110 = self.body.block3.unit04.conv2.weight
primals_111 = self.body.block3.unit04.gn3.weight
primals_112 = self.body.block3.unit04.gn3.bias
primals_113 = self.body.block3.unit04.conv3.weight
primals_114 = self.body.block4.unit01.gn1.weight
primals_115 = self.body.block4.unit01.gn1.bias
primals_117 = self.body.block4.unit01.conv1.weight
primals_118 = self.body.block4.unit01.gn2.weight
primals_119 = self.body.block4.unit01.gn2.bias
primals_120 = self.body.block4.unit01.conv2.weight
primals_121 = self.body.block4.unit01.gn3.weight
primals_122 = self.body.block4.unit01.gn3.bias
primals_123 = self.body.block4.unit01.conv3.weight
primals_116 = self.body.block4.unit01.downsample.weight
primals_124 = self.body.block4.unit02.gn1.weight
primals_125 = self.body.block4.unit02.gn1.bias
primals_126 = self.body.block4.unit02.conv1.weight
primals_127 = self.body.block4.unit02.gn2.weight
primals_128 = self.body.block4.unit02.gn2.bias
primals_129 = self.body.block4.unit02.conv2.weight
primals_130 = self.body.block4.unit02.gn3.weight
primals_131 = self.body.block4.unit02.gn3.bias
primals_132 = self.body.block4.unit02.conv3.weight
primals_133 = self.body.block4.unit03.gn1.weight
primals_134 = self.body.block4.unit03.gn1.bias
primals_135 = self.body.block4.unit03.conv1.weight
primals_136 = self.body.block4.unit03.gn2.weight
primals_137 = self.body.block4.unit03.gn2.bias
primals_138 = self.body.block4.unit03.conv2.weight
primals_139 = self.body.block4.unit03.gn3.weight
primals_140 = self.body.block4.unit03.gn3.bias
primals_141 = self.body.block4.unit03.conv3.weight
primals_142 = self.body.block4.unit04.gn1.weight
primals_143 = self.body.block4.unit04.gn1.bias
primals_144 = self.body.block4.unit04.conv1.weight
primals_145 = self.body.block4.unit04.gn2.weight
primals_146 = self.body.block4.unit04.gn2.bias
primals_147 = self.body.block4.unit04.conv2.weight
primals_148 = self.body.block4.unit04.gn3.weight
primals_149 = self.body.block4.unit04.gn3.bias
primals_150 = self.body.block4.unit04.conv3.weight
primals_151 = self.head.gn.weight
primals_152 = self.head.gn.bias
primals_153 = self.head.conv.weight
primals_154 = self.head.conv.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41, primals_42, primals_43, primals_44,
primals_45, primals_46, primals_47, primals_48, primals_49,
primals_50, primals_51, primals_52, primals_53, primals_54,
primals_55, primals_56, primals_57, primals_58, primals_59,
primals_60, primals_61, primals_62, primals_63, primals_64,
primals_65, primals_66, primals_67, primals_68, primals_69,
primals_70, primals_71, primals_72, primals_73, primals_74,
primals_75, primals_76, primals_77, primals_78, primals_79,
primals_80, primals_81, primals_82, primals_83, primals_84,
primals_85, primals_86, primals_87, primals_88, primals_89,
primals_90, primals_91, primals_92, primals_93, primals_94,
primals_95, primals_96, primals_97, primals_98, primals_99,
primals_100, primals_101, primals_102, primals_103, primals_104,
primals_105, primals_106, primals_107, primals_108, primals_109,
primals_110, primals_111, primals_112, primals_113, primals_114,
primals_115, primals_116, primals_117, primals_118, primals_119,
primals_120, primals_121, primals_122, primals_123, primals_124,
primals_125, primals_126, primals_127, primals_128, primals_129,
primals_130, primals_131, primals_132, primals_133, primals_134,
primals_135, primals_136, primals_137, primals_138, primals_139,
primals_140, primals_141, primals_142, primals_143, primals_144,
primals_145, primals_146, primals_147, primals_148, primals_149,
primals_150, primals_151, primals_152, primals_153, primals_154])
return output[0]
| bethgelab/robustness | ResNetV2 | false | 16,458 | [
"Apache-2.0"
] | 67 | aa0a6798fe3973bae5f47561721b59b39f126ab7 | https://github.com/bethgelab/robustness/tree/aa0a6798fe3973bae5f47561721b59b39f126ab7 |
MLP | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/xl/cxlq33nvqoe4qybjyopeuxrddy5uzid5fs5kzfhjq367a6fsdj3s.py
# Topologically Sorted Source Nodes: [conv2d, hardtanh], Original ATen: [aten.convolution, aten.hardtanh, aten.hardtanh_backward]
# Source node to ATen node mapping:
# conv2d => convolution
# hardtanh => clamp_max, clamp_min
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%convolution, 0.0), kwargs = {})
# %clamp_max : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 6.0), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%convolution, 0.0), kwargs = {})
# %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%convolution, 6.0), kwargs = {})
# %bitwise_or : [num_users=1] = call_function[target=torch.ops.aten.bitwise_or.Tensor](args = (%le, %ge), kwargs = {})
triton_poi_fused_convolution_hardtanh_hardtanh_backward_0 = async_compile.triton('triton_poi_fused_convolution_hardtanh_hardtanh_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_hardtanh_hardtanh_backward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_hardtanh_hardtanh_backward_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = tmp2 <= tmp3
tmp8 = tmp2 >= tmp5
tmp9 = tmp7 | tmp8
tl.store(out_ptr0 + (x3), tmp6, xmask)
tl.store(out_ptr1 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/32/c32v7egt4mupqssam3gmac2qgv3ujprjybthsgweflmot256qqw7.py
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%clamp_max, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d, hardtanh], Original ATen: [aten.convolution, aten.hardtanh, aten.hardtanh_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_hardtanh_hardtanh_backward_0.run(buf0, primals_2, buf1, buf4, 256, grid=grid(256), stream=stream0)
del buf0
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf3, primals_5, 256, grid=grid(256), stream=stream0)
del primals_5
return (buf3, primals_1, primals_3, primals_4, buf1, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import Tensor
from torch import nn
class MLP(nn.Module):
def __init__(self, dim, hidden_dim, out_dim=None) ->None:
super().__init__()
out_dim = out_dim or dim
self.fc1 = nn.Conv2d(dim, hidden_dim, 1, 1, 0)
self.act = nn.ReLU6(True)
self.fc2 = nn.Conv2d(hidden_dim, out_dim, 1, 1, 0)
def forward(self, x: 'Tensor') ->Tensor:
return self.fc2(self.act(self.fc1(x)))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4, 'hidden_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_hardtanh_hardtanh_backward_0(in_ptr0,
in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = tmp2 <= tmp3
tmp8 = tmp2 >= tmp5
tmp9 = tmp7 | tmp8
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_convolution_hardtanh_hardtanh_backward_0[grid(256)](
buf0, primals_2, buf1, buf4, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del buf0
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(256)](buf3, primals_5, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
return buf3, primals_1, primals_3, primals_4, buf1, buf4
class MLPNew(nn.Module):
def __init__(self, dim, hidden_dim, out_dim=None) ->None:
super().__init__()
out_dim = out_dim or dim
self.fc1 = nn.Conv2d(dim, hidden_dim, 1, 1, 0)
self.act = nn.ReLU6(True)
self.fc2 = nn.Conv2d(hidden_dim, out_dim, 1, 1, 0)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| sithu31296/image_classification | MLP | false | 16,459 | [
"MIT"
] | 57 | 6b8cbce96100225621cee3166a73e852ba216cc3 | https://github.com/sithu31296/image_classification/tree/6b8cbce96100225621cee3166a73e852ba216cc3 |
BodyPoseModel | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ej/cejfrwnzxinkchwn6symdb72fdtj7gix5hy2vuswodhbeh45mrae.py
# Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# input_1 => convolution
# input_2 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/7z/c7zuih2ysjtir5rh5seep5ijnhokjlgkyjw2edhf257ahvz4iipr.py
# Topologically Sorted Source Nodes: [input_5], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# input_5 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = (xindex // 32)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xq/cxqz2dr7nh2qabrtemj52pazmhrknj5ltcy32ka252ia6a3jgpqi.py
# Topologically Sorted Source Nodes: [input_6, input_7], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# input_6 => convolution_2
# input_7 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 128
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/pr/cpri5daxkfbmt5ostbhb5o2avircr64a2rmdkxfackaxyjfc7owe.py
# Topologically Sorted Source Nodes: [input_10], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# input_10 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/of/cof37d5wbqzvtkioj7k4me7wqpvfv55rs62ytonj7gij2o3abnod.py
# Topologically Sorted Source Nodes: [input_11, input_12], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# input_11 => convolution_4
# input_12 => relu_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 256
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/mn/cmnzsv2cdbsuq2sygridqvwumzmcvknuthlumel5m25l2ajsr4ft.py
# Topologically Sorted Source Nodes: [input_19], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# input_19 => getitem_4, getitem_5
# Graph fragment:
# %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {})
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ic/cicsjqc3cfcjzqlztx4hz7ssqwe47ngo3g2onc6463k3vgfmt5cw.py
# Topologically Sorted Source Nodes: [input_20, input_21], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# input_20 => convolution_8
# input_21 => relu_8
# Graph fragment:
# %convolution_8 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_18, %primals_19, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_8 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_8,), kwargs = {})
triton_poi_fused_convolution_relu_6 = async_compile.triton('triton_poi_fused_convolution_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 512
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/rs/crsb2j7t6kjc2dizrgavde3h3rerob3nhf7iqux6o24562lkvvoe.py
# Topologically Sorted Source Nodes: [input_24, input_25], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# input_24 => convolution_10
# input_25 => relu_10
# Graph fragment:
# %convolution_10 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_9, %primals_22, %primals_23, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_10 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_10,), kwargs = {})
triton_poi_fused_convolution_relu_7 = async_compile.triton('triton_poi_fused_convolution_relu_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 256
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/qy/cqyis4pzdzl2zcpdenz7kfyw4uxhak4ugnkkhusp7xtxj4qytdez.py
# Topologically Sorted Source Nodes: [input_26, input_27], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# input_26 => convolution_11
# input_27 => relu_11
# Graph fragment:
# %convolution_11 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_10, %primals_24, %primals_25, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_11 : [num_users=8] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_11,), kwargs = {})
triton_poi_fused_convolution_relu_8 = async_compile.triton('triton_poi_fused_convolution_relu_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 128
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xh/cxh5qnz7467zwx7kksukcyl5yqbimdzz2jusq6gmtz3v7ngsbddj.py
# Topologically Sorted Source Nodes: [out2], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# out2 => cat
# Graph fragment:
# %cat : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution_16, %convolution_21, %relu_11], 1), kwargs = {})
triton_poi_fused_cat_9 = async_compile.triton('triton_poi_fused_cat_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 47360
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 64) % 185
x0 = xindex % 64
x2 = (xindex // 11840)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 38, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (64*x1) + (2432*x2)), tmp4 & xmask, other=0.0)
tmp6 = tl.load(in_ptr1 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 57, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tmp10 & tmp12
tmp14 = tl.load(in_ptr2 + (x0 + (64*((-38) + x1)) + (1216*x2)), tmp13 & xmask, other=0.0)
tmp15 = tl.load(in_ptr3 + ((-38) + x1), tmp13 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp13, tmp16, tmp17)
tmp19 = tmp0 >= tmp11
tmp20 = tl.full([1], 185, tl.int64)
tmp21 = tmp0 < tmp20
tmp22 = tl.load(in_ptr4 + (x0 + (64*((-57) + x1)) + (8192*x2)), tmp19 & xmask, other=0.0)
tmp23 = tl.where(tmp13, tmp18, tmp22)
tmp24 = tl.where(tmp4, tmp9, tmp23)
tl.store(out_ptr0 + (x3), tmp24, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/4y/c4y6uyvhmsek266ivpsqvnnoksgofgtz3h3rggm6nksziikdh57s.py
# Topologically Sorted Source Nodes: [input_162], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# input_162 => convolution_84
# Graph fragment:
# %convolution_84 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_73, %primals_170, %primals_171, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_10 = async_compile.triton('triton_poi_fused_convolution_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 9728
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 64) % 38
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ce/ccekwplfoihswfouqfjqcwmfd2cg37pkjpmovikpxyaqzec4g3iq.py
# Topologically Sorted Source Nodes: [input_175, input_176], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# input_175 => convolution_91
# input_176 => relu_80
# Graph fragment:
# %convolution_91 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_79, %primals_184, %primals_185, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_80 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_91,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_80, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_11 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_11(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4864
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = (xindex // 64) % 19
x2 = (xindex // 1216)
x3 = xindex % 1216
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x3 + (1280*x2)), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102, primals_103, primals_104, primals_105, primals_106, primals_107, primals_108, primals_109, primals_110, primals_111, primals_112, primals_113, primals_114, primals_115, primals_116, primals_117, primals_118, primals_119, primals_120, primals_121, primals_122, primals_123, primals_124, primals_125, primals_126, primals_127, primals_128, primals_129, primals_130, primals_131, primals_132, primals_133, primals_134, primals_135, primals_136, primals_137, primals_138, primals_139, primals_140, primals_141, primals_142, primals_143, primals_144, primals_145, primals_146, primals_147, primals_148, primals_149, primals_150, primals_151, primals_152, primals_153, primals_154, primals_155, primals_156, primals_157, primals_158, primals_159, primals_160, primals_161, primals_162, primals_163, primals_164, primals_165, primals_166, primals_167, primals_168, primals_169, primals_170, primals_171, primals_172, primals_173, primals_174, primals_175, primals_176, primals_177, primals_178, primals_179, primals_180, primals_181, primals_182, primals_183, primals_184, primals_185 = args
args.clear()
assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128, ), (1, ))
assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (128, ), (1, ))
assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_11, (256, ), (1, ))
assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_13, (256, ), (1, ))
assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_15, (256, ), (1, ))
assert_size_stride(primals_16, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_17, (256, ), (1, ))
assert_size_stride(primals_18, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_19, (512, ), (1, ))
assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_21, (512, ), (1, ))
assert_size_stride(primals_22, (256, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_23, (256, ), (1, ))
assert_size_stride(primals_24, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_25, (128, ), (1, ))
assert_size_stride(primals_26, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_27, (128, ), (1, ))
assert_size_stride(primals_28, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_29, (128, ), (1, ))
assert_size_stride(primals_30, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_31, (128, ), (1, ))
assert_size_stride(primals_32, (512, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_33, (512, ), (1, ))
assert_size_stride(primals_34, (38, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_35, (38, ), (1, ))
assert_size_stride(primals_36, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_37, (128, ), (1, ))
assert_size_stride(primals_38, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_39, (128, ), (1, ))
assert_size_stride(primals_40, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_41, (128, ), (1, ))
assert_size_stride(primals_42, (512, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_43, (512, ), (1, ))
assert_size_stride(primals_44, (19, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_45, (19, ), (1, ))
assert_size_stride(primals_46, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_47, (128, ), (1, ))
assert_size_stride(primals_48, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_49, (128, ), (1, ))
assert_size_stride(primals_50, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_51, (128, ), (1, ))
assert_size_stride(primals_52, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_53, (128, ), (1, ))
assert_size_stride(primals_54, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_55, (128, ), (1, ))
assert_size_stride(primals_56, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_57, (128, ), (1, ))
assert_size_stride(primals_58, (38, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_59, (38, ), (1, ))
assert_size_stride(primals_60, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_61, (128, ), (1, ))
assert_size_stride(primals_62, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_63, (128, ), (1, ))
assert_size_stride(primals_64, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_65, (128, ), (1, ))
assert_size_stride(primals_66, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_67, (128, ), (1, ))
assert_size_stride(primals_68, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_69, (128, ), (1, ))
assert_size_stride(primals_70, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_71, (128, ), (1, ))
assert_size_stride(primals_72, (19, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_73, (19, ), (1, ))
assert_size_stride(primals_74, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_75, (128, ), (1, ))
assert_size_stride(primals_76, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_77, (128, ), (1, ))
assert_size_stride(primals_78, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_79, (128, ), (1, ))
assert_size_stride(primals_80, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_81, (128, ), (1, ))
assert_size_stride(primals_82, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_83, (128, ), (1, ))
assert_size_stride(primals_84, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_85, (128, ), (1, ))
assert_size_stride(primals_86, (38, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_87, (38, ), (1, ))
assert_size_stride(primals_88, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_89, (128, ), (1, ))
assert_size_stride(primals_90, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_91, (128, ), (1, ))
assert_size_stride(primals_92, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_93, (128, ), (1, ))
assert_size_stride(primals_94, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_95, (128, ), (1, ))
assert_size_stride(primals_96, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_97, (128, ), (1, ))
assert_size_stride(primals_98, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_99, (128, ), (1, ))
assert_size_stride(primals_100, (19, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_101, (19, ), (1, ))
assert_size_stride(primals_102, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_103, (128, ), (1, ))
assert_size_stride(primals_104, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_105, (128, ), (1, ))
assert_size_stride(primals_106, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_107, (128, ), (1, ))
assert_size_stride(primals_108, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_109, (128, ), (1, ))
assert_size_stride(primals_110, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_111, (128, ), (1, ))
assert_size_stride(primals_112, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_113, (128, ), (1, ))
assert_size_stride(primals_114, (38, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_115, (38, ), (1, ))
assert_size_stride(primals_116, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_117, (128, ), (1, ))
assert_size_stride(primals_118, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_119, (128, ), (1, ))
assert_size_stride(primals_120, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_121, (128, ), (1, ))
assert_size_stride(primals_122, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_123, (128, ), (1, ))
assert_size_stride(primals_124, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_125, (128, ), (1, ))
assert_size_stride(primals_126, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_127, (128, ), (1, ))
assert_size_stride(primals_128, (19, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_129, (19, ), (1, ))
assert_size_stride(primals_130, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_131, (128, ), (1, ))
assert_size_stride(primals_132, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_133, (128, ), (1, ))
assert_size_stride(primals_134, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_135, (128, ), (1, ))
assert_size_stride(primals_136, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_137, (128, ), (1, ))
assert_size_stride(primals_138, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_139, (128, ), (1, ))
assert_size_stride(primals_140, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_141, (128, ), (1, ))
assert_size_stride(primals_142, (38, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_143, (38, ), (1, ))
assert_size_stride(primals_144, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_145, (128, ), (1, ))
assert_size_stride(primals_146, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_147, (128, ), (1, ))
assert_size_stride(primals_148, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_149, (128, ), (1, ))
assert_size_stride(primals_150, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_151, (128, ), (1, ))
assert_size_stride(primals_152, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_153, (128, ), (1, ))
assert_size_stride(primals_154, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_155, (128, ), (1, ))
assert_size_stride(primals_156, (19, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_157, (19, ), (1, ))
assert_size_stride(primals_158, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_159, (128, ), (1, ))
assert_size_stride(primals_160, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_161, (128, ), (1, ))
assert_size_stride(primals_162, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_163, (128, ), (1, ))
assert_size_stride(primals_164, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_165, (128, ), (1, ))
assert_size_stride(primals_166, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_167, (128, ), (1, ))
assert_size_stride(primals_168, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_169, (128, ), (1, ))
assert_size_stride(primals_170, (38, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_171, (38, ), (1, ))
assert_size_stride(primals_172, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_173, (128, ), (1, ))
assert_size_stride(primals_174, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_175, (128, ), (1, ))
assert_size_stride(primals_176, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_177, (128, ), (1, ))
assert_size_stride(primals_178, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_179, (128, ), (1, ))
assert_size_stride(primals_180, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_181, (128, ), (1, ))
assert_size_stride(primals_182, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_183, (128, ), (1, ))
assert_size_stride(primals_184, (19, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_185, (19, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 1048576, grid=grid(1048576), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [input_3, input_4], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_0.run(buf3, primals_5, 1048576, grid=grid(1048576), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32)
buf5 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.int8)
# Topologically Sorted Source Nodes: [input_5], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf3, buf4, buf5, 262144, grid=grid(262144), stream=stream0)
# Topologically Sorted Source Nodes: [input_6], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 128, 32, 32), (131072, 1024, 32, 1))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [input_6, input_7], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf7, primals_7, 524288, grid=grid(524288), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [input_8], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 128, 32, 32), (131072, 1024, 32, 1))
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [input_8, input_9], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf9, primals_9, 524288, grid=grid(524288), stream=stream0)
del primals_9
buf10 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.float32)
buf11 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.int8)
# Topologically Sorted Source Nodes: [input_10], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf9, buf10, buf11, 131072, grid=grid(131072), stream=stream0)
# Topologically Sorted Source Nodes: [input_11], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 256, 16, 16), (65536, 256, 16, 1))
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [input_11, input_12], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf13, primals_11, 262144, grid=grid(262144), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [input_13], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf13, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 256, 16, 16), (65536, 256, 16, 1))
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [input_13, input_14], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf15, primals_13, 262144, grid=grid(262144), stream=stream0)
del primals_13
# Topologically Sorted Source Nodes: [input_15], Original ATen: [aten.convolution]
buf16 = extern_kernels.convolution(buf15, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 256, 16, 16), (65536, 256, 16, 1))
buf17 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [input_15, input_16], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf17, primals_15, 262144, grid=grid(262144), stream=stream0)
del primals_15
# Topologically Sorted Source Nodes: [input_17], Original ATen: [aten.convolution]
buf18 = extern_kernels.convolution(buf17, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 256, 16, 16), (65536, 256, 16, 1))
buf19 = buf18; del buf18 # reuse
# Topologically Sorted Source Nodes: [input_17, input_18], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf19, primals_17, 262144, grid=grid(262144), stream=stream0)
del primals_17
buf20 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.float32)
buf21 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.int8)
# Topologically Sorted Source Nodes: [input_19], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_5.run(buf19, buf20, buf21, 65536, grid=grid(65536), stream=stream0)
# Topologically Sorted Source Nodes: [input_20], Original ATen: [aten.convolution]
buf22 = extern_kernels.convolution(buf20, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 512, 8, 8), (32768, 64, 8, 1))
buf23 = buf22; del buf22 # reuse
# Topologically Sorted Source Nodes: [input_20, input_21], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf23, primals_19, 131072, grid=grid(131072), stream=stream0)
del primals_19
# Topologically Sorted Source Nodes: [input_22], Original ATen: [aten.convolution]
buf24 = extern_kernels.convolution(buf23, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 512, 8, 8), (32768, 64, 8, 1))
buf25 = buf24; del buf24 # reuse
# Topologically Sorted Source Nodes: [input_22, input_23], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf25, primals_21, 131072, grid=grid(131072), stream=stream0)
del primals_21
# Topologically Sorted Source Nodes: [input_24], Original ATen: [aten.convolution]
buf26 = extern_kernels.convolution(buf25, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 256, 8, 8), (16384, 64, 8, 1))
buf27 = buf26; del buf26 # reuse
# Topologically Sorted Source Nodes: [input_24, input_25], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_7.run(buf27, primals_23, 65536, grid=grid(65536), stream=stream0)
del primals_23
# Topologically Sorted Source Nodes: [input_26], Original ATen: [aten.convolution]
buf28 = extern_kernels.convolution(buf27, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 128, 8, 8), (8192, 64, 8, 1))
buf29 = buf28; del buf28 # reuse
# Topologically Sorted Source Nodes: [input_26, input_27], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf29, primals_25, 32768, grid=grid(32768), stream=stream0)
del primals_25
# Topologically Sorted Source Nodes: [input_28], Original ATen: [aten.convolution]
buf30 = extern_kernels.convolution(buf29, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 128, 8, 8), (8192, 64, 8, 1))
buf31 = buf30; del buf30 # reuse
# Topologically Sorted Source Nodes: [input_28, input_29], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf31, primals_27, 32768, grid=grid(32768), stream=stream0)
del primals_27
# Topologically Sorted Source Nodes: [input_30], Original ATen: [aten.convolution]
buf32 = extern_kernels.convolution(buf31, primals_28, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 128, 8, 8), (8192, 64, 8, 1))
buf33 = buf32; del buf32 # reuse
# Topologically Sorted Source Nodes: [input_30, input_31], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf33, primals_29, 32768, grid=grid(32768), stream=stream0)
del primals_29
# Topologically Sorted Source Nodes: [input_32], Original ATen: [aten.convolution]
buf34 = extern_kernels.convolution(buf33, primals_30, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf34, (4, 128, 8, 8), (8192, 64, 8, 1))
buf35 = buf34; del buf34 # reuse
# Topologically Sorted Source Nodes: [input_32, input_33], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf35, primals_31, 32768, grid=grid(32768), stream=stream0)
del primals_31
# Topologically Sorted Source Nodes: [input_34], Original ATen: [aten.convolution]
buf36 = extern_kernels.convolution(buf35, primals_32, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 512, 8, 8), (32768, 64, 8, 1))
buf37 = buf36; del buf36 # reuse
# Topologically Sorted Source Nodes: [input_34, input_35], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf37, primals_33, 131072, grid=grid(131072), stream=stream0)
del primals_33
# Topologically Sorted Source Nodes: [input_36], Original ATen: [aten.convolution]
buf38 = extern_kernels.convolution(buf37, primals_34, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 38, 8, 8), (2432, 64, 8, 1))
# Topologically Sorted Source Nodes: [input_37], Original ATen: [aten.convolution]
buf39 = extern_kernels.convolution(buf29, primals_36, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf39, (4, 128, 8, 8), (8192, 64, 8, 1))
buf40 = buf39; del buf39 # reuse
# Topologically Sorted Source Nodes: [input_37, input_38], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf40, primals_37, 32768, grid=grid(32768), stream=stream0)
del primals_37
# Topologically Sorted Source Nodes: [input_39], Original ATen: [aten.convolution]
buf41 = extern_kernels.convolution(buf40, primals_38, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf41, (4, 128, 8, 8), (8192, 64, 8, 1))
buf42 = buf41; del buf41 # reuse
# Topologically Sorted Source Nodes: [input_39, input_40], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf42, primals_39, 32768, grid=grid(32768), stream=stream0)
del primals_39
# Topologically Sorted Source Nodes: [input_41], Original ATen: [aten.convolution]
buf43 = extern_kernels.convolution(buf42, primals_40, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf43, (4, 128, 8, 8), (8192, 64, 8, 1))
buf44 = buf43; del buf43 # reuse
# Topologically Sorted Source Nodes: [input_41, input_42], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf44, primals_41, 32768, grid=grid(32768), stream=stream0)
del primals_41
# Topologically Sorted Source Nodes: [input_43], Original ATen: [aten.convolution]
buf45 = extern_kernels.convolution(buf44, primals_42, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf45, (4, 512, 8, 8), (32768, 64, 8, 1))
buf46 = buf45; del buf45 # reuse
# Topologically Sorted Source Nodes: [input_43, input_44], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf46, primals_43, 131072, grid=grid(131072), stream=stream0)
del primals_43
# Topologically Sorted Source Nodes: [input_45], Original ATen: [aten.convolution]
buf47 = extern_kernels.convolution(buf46, primals_44, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf47, (4, 19, 8, 8), (1216, 64, 8, 1))
buf48 = empty_strided_cuda((4, 185, 8, 8), (11840, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [out2], Original ATen: [aten.cat]
triton_poi_fused_cat_9.run(buf38, primals_35, buf47, primals_45, buf29, buf48, 47360, grid=grid(47360), stream=stream0)
del buf38
del buf47
del primals_35
del primals_45
# Topologically Sorted Source Nodes: [input_46], Original ATen: [aten.convolution]
buf49 = extern_kernels.convolution(buf48, primals_46, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf49, (4, 128, 8, 8), (8192, 64, 8, 1))
buf50 = buf49; del buf49 # reuse
# Topologically Sorted Source Nodes: [input_46, input_47], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf50, primals_47, 32768, grid=grid(32768), stream=stream0)
del primals_47
# Topologically Sorted Source Nodes: [input_48], Original ATen: [aten.convolution]
buf51 = extern_kernels.convolution(buf50, primals_48, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf51, (4, 128, 8, 8), (8192, 64, 8, 1))
buf52 = buf51; del buf51 # reuse
# Topologically Sorted Source Nodes: [input_48, input_49], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf52, primals_49, 32768, grid=grid(32768), stream=stream0)
del primals_49
# Topologically Sorted Source Nodes: [input_50], Original ATen: [aten.convolution]
buf53 = extern_kernels.convolution(buf52, primals_50, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf53, (4, 128, 8, 8), (8192, 64, 8, 1))
buf54 = buf53; del buf53 # reuse
# Topologically Sorted Source Nodes: [input_50, input_51], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf54, primals_51, 32768, grid=grid(32768), stream=stream0)
del primals_51
# Topologically Sorted Source Nodes: [input_52], Original ATen: [aten.convolution]
buf55 = extern_kernels.convolution(buf54, primals_52, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf55, (4, 128, 8, 8), (8192, 64, 8, 1))
buf56 = buf55; del buf55 # reuse
# Topologically Sorted Source Nodes: [input_52, input_53], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf56, primals_53, 32768, grid=grid(32768), stream=stream0)
del primals_53
# Topologically Sorted Source Nodes: [input_54], Original ATen: [aten.convolution]
buf57 = extern_kernels.convolution(buf56, primals_54, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf57, (4, 128, 8, 8), (8192, 64, 8, 1))
buf58 = buf57; del buf57 # reuse
# Topologically Sorted Source Nodes: [input_54, input_55], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf58, primals_55, 32768, grid=grid(32768), stream=stream0)
del primals_55
# Topologically Sorted Source Nodes: [input_56], Original ATen: [aten.convolution]
buf59 = extern_kernels.convolution(buf58, primals_56, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf59, (4, 128, 8, 8), (8192, 64, 8, 1))
buf60 = buf59; del buf59 # reuse
# Topologically Sorted Source Nodes: [input_56, input_57], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf60, primals_57, 32768, grid=grid(32768), stream=stream0)
del primals_57
# Topologically Sorted Source Nodes: [input_58], Original ATen: [aten.convolution]
buf61 = extern_kernels.convolution(buf60, primals_58, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf61, (4, 38, 8, 8), (2432, 64, 8, 1))
# Topologically Sorted Source Nodes: [input_59], Original ATen: [aten.convolution]
buf62 = extern_kernels.convolution(buf48, primals_60, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf62, (4, 128, 8, 8), (8192, 64, 8, 1))
buf63 = buf62; del buf62 # reuse
# Topologically Sorted Source Nodes: [input_59, input_60], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf63, primals_61, 32768, grid=grid(32768), stream=stream0)
del primals_61
# Topologically Sorted Source Nodes: [input_61], Original ATen: [aten.convolution]
buf64 = extern_kernels.convolution(buf63, primals_62, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf64, (4, 128, 8, 8), (8192, 64, 8, 1))
buf65 = buf64; del buf64 # reuse
# Topologically Sorted Source Nodes: [input_61, input_62], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf65, primals_63, 32768, grid=grid(32768), stream=stream0)
del primals_63
# Topologically Sorted Source Nodes: [input_63], Original ATen: [aten.convolution]
buf66 = extern_kernels.convolution(buf65, primals_64, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf66, (4, 128, 8, 8), (8192, 64, 8, 1))
buf67 = buf66; del buf66 # reuse
# Topologically Sorted Source Nodes: [input_63, input_64], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf67, primals_65, 32768, grid=grid(32768), stream=stream0)
del primals_65
# Topologically Sorted Source Nodes: [input_65], Original ATen: [aten.convolution]
buf68 = extern_kernels.convolution(buf67, primals_66, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf68, (4, 128, 8, 8), (8192, 64, 8, 1))
buf69 = buf68; del buf68 # reuse
# Topologically Sorted Source Nodes: [input_65, input_66], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf69, primals_67, 32768, grid=grid(32768), stream=stream0)
del primals_67
# Topologically Sorted Source Nodes: [input_67], Original ATen: [aten.convolution]
buf70 = extern_kernels.convolution(buf69, primals_68, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf70, (4, 128, 8, 8), (8192, 64, 8, 1))
buf71 = buf70; del buf70 # reuse
# Topologically Sorted Source Nodes: [input_67, input_68], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf71, primals_69, 32768, grid=grid(32768), stream=stream0)
del primals_69
# Topologically Sorted Source Nodes: [input_69], Original ATen: [aten.convolution]
buf72 = extern_kernels.convolution(buf71, primals_70, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf72, (4, 128, 8, 8), (8192, 64, 8, 1))
buf73 = buf72; del buf72 # reuse
# Topologically Sorted Source Nodes: [input_69, input_70], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf73, primals_71, 32768, grid=grid(32768), stream=stream0)
del primals_71
# Topologically Sorted Source Nodes: [input_71], Original ATen: [aten.convolution]
buf74 = extern_kernels.convolution(buf73, primals_72, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf74, (4, 19, 8, 8), (1216, 64, 8, 1))
buf75 = empty_strided_cuda((4, 185, 8, 8), (11840, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [out3], Original ATen: [aten.cat]
triton_poi_fused_cat_9.run(buf61, primals_59, buf74, primals_73, buf29, buf75, 47360, grid=grid(47360), stream=stream0)
del buf61
del buf74
del primals_59
del primals_73
# Topologically Sorted Source Nodes: [input_72], Original ATen: [aten.convolution]
buf76 = extern_kernels.convolution(buf75, primals_74, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf76, (4, 128, 8, 8), (8192, 64, 8, 1))
buf77 = buf76; del buf76 # reuse
# Topologically Sorted Source Nodes: [input_72, input_73], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf77, primals_75, 32768, grid=grid(32768), stream=stream0)
del primals_75
# Topologically Sorted Source Nodes: [input_74], Original ATen: [aten.convolution]
buf78 = extern_kernels.convolution(buf77, primals_76, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf78, (4, 128, 8, 8), (8192, 64, 8, 1))
buf79 = buf78; del buf78 # reuse
# Topologically Sorted Source Nodes: [input_74, input_75], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf79, primals_77, 32768, grid=grid(32768), stream=stream0)
del primals_77
# Topologically Sorted Source Nodes: [input_76], Original ATen: [aten.convolution]
buf80 = extern_kernels.convolution(buf79, primals_78, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf80, (4, 128, 8, 8), (8192, 64, 8, 1))
buf81 = buf80; del buf80 # reuse
# Topologically Sorted Source Nodes: [input_76, input_77], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf81, primals_79, 32768, grid=grid(32768), stream=stream0)
del primals_79
# Topologically Sorted Source Nodes: [input_78], Original ATen: [aten.convolution]
buf82 = extern_kernels.convolution(buf81, primals_80, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf82, (4, 128, 8, 8), (8192, 64, 8, 1))
buf83 = buf82; del buf82 # reuse
# Topologically Sorted Source Nodes: [input_78, input_79], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf83, primals_81, 32768, grid=grid(32768), stream=stream0)
del primals_81
# Topologically Sorted Source Nodes: [input_80], Original ATen: [aten.convolution]
buf84 = extern_kernels.convolution(buf83, primals_82, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf84, (4, 128, 8, 8), (8192, 64, 8, 1))
buf85 = buf84; del buf84 # reuse
# Topologically Sorted Source Nodes: [input_80, input_81], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf85, primals_83, 32768, grid=grid(32768), stream=stream0)
del primals_83
# Topologically Sorted Source Nodes: [input_82], Original ATen: [aten.convolution]
buf86 = extern_kernels.convolution(buf85, primals_84, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf86, (4, 128, 8, 8), (8192, 64, 8, 1))
buf87 = buf86; del buf86 # reuse
# Topologically Sorted Source Nodes: [input_82, input_83], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf87, primals_85, 32768, grid=grid(32768), stream=stream0)
del primals_85
# Topologically Sorted Source Nodes: [input_84], Original ATen: [aten.convolution]
buf88 = extern_kernels.convolution(buf87, primals_86, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf88, (4, 38, 8, 8), (2432, 64, 8, 1))
# Topologically Sorted Source Nodes: [input_85], Original ATen: [aten.convolution]
buf89 = extern_kernels.convolution(buf75, primals_88, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf89, (4, 128, 8, 8), (8192, 64, 8, 1))
buf90 = buf89; del buf89 # reuse
# Topologically Sorted Source Nodes: [input_85, input_86], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf90, primals_89, 32768, grid=grid(32768), stream=stream0)
del primals_89
# Topologically Sorted Source Nodes: [input_87], Original ATen: [aten.convolution]
buf91 = extern_kernels.convolution(buf90, primals_90, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf91, (4, 128, 8, 8), (8192, 64, 8, 1))
buf92 = buf91; del buf91 # reuse
# Topologically Sorted Source Nodes: [input_87, input_88], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf92, primals_91, 32768, grid=grid(32768), stream=stream0)
del primals_91
# Topologically Sorted Source Nodes: [input_89], Original ATen: [aten.convolution]
buf93 = extern_kernels.convolution(buf92, primals_92, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf93, (4, 128, 8, 8), (8192, 64, 8, 1))
buf94 = buf93; del buf93 # reuse
# Topologically Sorted Source Nodes: [input_89, input_90], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf94, primals_93, 32768, grid=grid(32768), stream=stream0)
del primals_93
# Topologically Sorted Source Nodes: [input_91], Original ATen: [aten.convolution]
buf95 = extern_kernels.convolution(buf94, primals_94, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf95, (4, 128, 8, 8), (8192, 64, 8, 1))
buf96 = buf95; del buf95 # reuse
# Topologically Sorted Source Nodes: [input_91, input_92], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf96, primals_95, 32768, grid=grid(32768), stream=stream0)
del primals_95
# Topologically Sorted Source Nodes: [input_93], Original ATen: [aten.convolution]
buf97 = extern_kernels.convolution(buf96, primals_96, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf97, (4, 128, 8, 8), (8192, 64, 8, 1))
buf98 = buf97; del buf97 # reuse
# Topologically Sorted Source Nodes: [input_93, input_94], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf98, primals_97, 32768, grid=grid(32768), stream=stream0)
del primals_97
# Topologically Sorted Source Nodes: [input_95], Original ATen: [aten.convolution]
buf99 = extern_kernels.convolution(buf98, primals_98, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf99, (4, 128, 8, 8), (8192, 64, 8, 1))
buf100 = buf99; del buf99 # reuse
# Topologically Sorted Source Nodes: [input_95, input_96], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf100, primals_99, 32768, grid=grid(32768), stream=stream0)
del primals_99
# Topologically Sorted Source Nodes: [input_97], Original ATen: [aten.convolution]
buf101 = extern_kernels.convolution(buf100, primals_100, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf101, (4, 19, 8, 8), (1216, 64, 8, 1))
buf102 = empty_strided_cuda((4, 185, 8, 8), (11840, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [out4], Original ATen: [aten.cat]
triton_poi_fused_cat_9.run(buf88, primals_87, buf101, primals_101, buf29, buf102, 47360, grid=grid(47360), stream=stream0)
del buf101
del buf88
del primals_101
del primals_87
# Topologically Sorted Source Nodes: [input_98], Original ATen: [aten.convolution]
buf103 = extern_kernels.convolution(buf102, primals_102, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf103, (4, 128, 8, 8), (8192, 64, 8, 1))
buf104 = buf103; del buf103 # reuse
# Topologically Sorted Source Nodes: [input_98, input_99], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf104, primals_103, 32768, grid=grid(32768), stream=stream0)
del primals_103
# Topologically Sorted Source Nodes: [input_100], Original ATen: [aten.convolution]
buf105 = extern_kernels.convolution(buf104, primals_104, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf105, (4, 128, 8, 8), (8192, 64, 8, 1))
buf106 = buf105; del buf105 # reuse
# Topologically Sorted Source Nodes: [input_100, input_101], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf106, primals_105, 32768, grid=grid(32768), stream=stream0)
del primals_105
# Topologically Sorted Source Nodes: [input_102], Original ATen: [aten.convolution]
buf107 = extern_kernels.convolution(buf106, primals_106, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf107, (4, 128, 8, 8), (8192, 64, 8, 1))
buf108 = buf107; del buf107 # reuse
# Topologically Sorted Source Nodes: [input_102, input_103], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf108, primals_107, 32768, grid=grid(32768), stream=stream0)
del primals_107
# Topologically Sorted Source Nodes: [input_104], Original ATen: [aten.convolution]
buf109 = extern_kernels.convolution(buf108, primals_108, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf109, (4, 128, 8, 8), (8192, 64, 8, 1))
buf110 = buf109; del buf109 # reuse
# Topologically Sorted Source Nodes: [input_104, input_105], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf110, primals_109, 32768, grid=grid(32768), stream=stream0)
del primals_109
# Topologically Sorted Source Nodes: [input_106], Original ATen: [aten.convolution]
buf111 = extern_kernels.convolution(buf110, primals_110, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf111, (4, 128, 8, 8), (8192, 64, 8, 1))
buf112 = buf111; del buf111 # reuse
# Topologically Sorted Source Nodes: [input_106, input_107], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf112, primals_111, 32768, grid=grid(32768), stream=stream0)
del primals_111
# Topologically Sorted Source Nodes: [input_108], Original ATen: [aten.convolution]
buf113 = extern_kernels.convolution(buf112, primals_112, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf113, (4, 128, 8, 8), (8192, 64, 8, 1))
buf114 = buf113; del buf113 # reuse
# Topologically Sorted Source Nodes: [input_108, input_109], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf114, primals_113, 32768, grid=grid(32768), stream=stream0)
del primals_113
# Topologically Sorted Source Nodes: [input_110], Original ATen: [aten.convolution]
buf115 = extern_kernels.convolution(buf114, primals_114, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf115, (4, 38, 8, 8), (2432, 64, 8, 1))
# Topologically Sorted Source Nodes: [input_111], Original ATen: [aten.convolution]
buf116 = extern_kernels.convolution(buf102, primals_116, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf116, (4, 128, 8, 8), (8192, 64, 8, 1))
buf117 = buf116; del buf116 # reuse
# Topologically Sorted Source Nodes: [input_111, input_112], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf117, primals_117, 32768, grid=grid(32768), stream=stream0)
del primals_117
# Topologically Sorted Source Nodes: [input_113], Original ATen: [aten.convolution]
buf118 = extern_kernels.convolution(buf117, primals_118, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf118, (4, 128, 8, 8), (8192, 64, 8, 1))
buf119 = buf118; del buf118 # reuse
# Topologically Sorted Source Nodes: [input_113, input_114], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf119, primals_119, 32768, grid=grid(32768), stream=stream0)
del primals_119
# Topologically Sorted Source Nodes: [input_115], Original ATen: [aten.convolution]
buf120 = extern_kernels.convolution(buf119, primals_120, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf120, (4, 128, 8, 8), (8192, 64, 8, 1))
buf121 = buf120; del buf120 # reuse
# Topologically Sorted Source Nodes: [input_115, input_116], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf121, primals_121, 32768, grid=grid(32768), stream=stream0)
del primals_121
# Topologically Sorted Source Nodes: [input_117], Original ATen: [aten.convolution]
buf122 = extern_kernels.convolution(buf121, primals_122, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf122, (4, 128, 8, 8), (8192, 64, 8, 1))
buf123 = buf122; del buf122 # reuse
# Topologically Sorted Source Nodes: [input_117, input_118], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf123, primals_123, 32768, grid=grid(32768), stream=stream0)
del primals_123
# Topologically Sorted Source Nodes: [input_119], Original ATen: [aten.convolution]
buf124 = extern_kernels.convolution(buf123, primals_124, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf124, (4, 128, 8, 8), (8192, 64, 8, 1))
buf125 = buf124; del buf124 # reuse
# Topologically Sorted Source Nodes: [input_119, input_120], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf125, primals_125, 32768, grid=grid(32768), stream=stream0)
del primals_125
# Topologically Sorted Source Nodes: [input_121], Original ATen: [aten.convolution]
buf126 = extern_kernels.convolution(buf125, primals_126, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf126, (4, 128, 8, 8), (8192, 64, 8, 1))
buf127 = buf126; del buf126 # reuse
# Topologically Sorted Source Nodes: [input_121, input_122], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf127, primals_127, 32768, grid=grid(32768), stream=stream0)
del primals_127
# Topologically Sorted Source Nodes: [input_123], Original ATen: [aten.convolution]
buf128 = extern_kernels.convolution(buf127, primals_128, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf128, (4, 19, 8, 8), (1216, 64, 8, 1))
buf129 = empty_strided_cuda((4, 185, 8, 8), (11840, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [out5], Original ATen: [aten.cat]
triton_poi_fused_cat_9.run(buf115, primals_115, buf128, primals_129, buf29, buf129, 47360, grid=grid(47360), stream=stream0)
del buf115
del buf128
del primals_115
del primals_129
# Topologically Sorted Source Nodes: [input_124], Original ATen: [aten.convolution]
buf130 = extern_kernels.convolution(buf129, primals_130, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf130, (4, 128, 8, 8), (8192, 64, 8, 1))
buf131 = buf130; del buf130 # reuse
# Topologically Sorted Source Nodes: [input_124, input_125], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf131, primals_131, 32768, grid=grid(32768), stream=stream0)
del primals_131
# Topologically Sorted Source Nodes: [input_126], Original ATen: [aten.convolution]
buf132 = extern_kernels.convolution(buf131, primals_132, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf132, (4, 128, 8, 8), (8192, 64, 8, 1))
buf133 = buf132; del buf132 # reuse
# Topologically Sorted Source Nodes: [input_126, input_127], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf133, primals_133, 32768, grid=grid(32768), stream=stream0)
del primals_133
# Topologically Sorted Source Nodes: [input_128], Original ATen: [aten.convolution]
buf134 = extern_kernels.convolution(buf133, primals_134, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf134, (4, 128, 8, 8), (8192, 64, 8, 1))
buf135 = buf134; del buf134 # reuse
# Topologically Sorted Source Nodes: [input_128, input_129], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf135, primals_135, 32768, grid=grid(32768), stream=stream0)
del primals_135
# Topologically Sorted Source Nodes: [input_130], Original ATen: [aten.convolution]
buf136 = extern_kernels.convolution(buf135, primals_136, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf136, (4, 128, 8, 8), (8192, 64, 8, 1))
buf137 = buf136; del buf136 # reuse
# Topologically Sorted Source Nodes: [input_130, input_131], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf137, primals_137, 32768, grid=grid(32768), stream=stream0)
del primals_137
# Topologically Sorted Source Nodes: [input_132], Original ATen: [aten.convolution]
buf138 = extern_kernels.convolution(buf137, primals_138, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf138, (4, 128, 8, 8), (8192, 64, 8, 1))
buf139 = buf138; del buf138 # reuse
# Topologically Sorted Source Nodes: [input_132, input_133], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf139, primals_139, 32768, grid=grid(32768), stream=stream0)
del primals_139
# Topologically Sorted Source Nodes: [input_134], Original ATen: [aten.convolution]
buf140 = extern_kernels.convolution(buf139, primals_140, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf140, (4, 128, 8, 8), (8192, 64, 8, 1))
buf141 = buf140; del buf140 # reuse
# Topologically Sorted Source Nodes: [input_134, input_135], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf141, primals_141, 32768, grid=grid(32768), stream=stream0)
del primals_141
# Topologically Sorted Source Nodes: [input_136], Original ATen: [aten.convolution]
buf142 = extern_kernels.convolution(buf141, primals_142, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf142, (4, 38, 8, 8), (2432, 64, 8, 1))
# Topologically Sorted Source Nodes: [input_137], Original ATen: [aten.convolution]
buf143 = extern_kernels.convolution(buf129, primals_144, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf143, (4, 128, 8, 8), (8192, 64, 8, 1))
buf144 = buf143; del buf143 # reuse
# Topologically Sorted Source Nodes: [input_137, input_138], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf144, primals_145, 32768, grid=grid(32768), stream=stream0)
del primals_145
# Topologically Sorted Source Nodes: [input_139], Original ATen: [aten.convolution]
buf145 = extern_kernels.convolution(buf144, primals_146, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf145, (4, 128, 8, 8), (8192, 64, 8, 1))
buf146 = buf145; del buf145 # reuse
# Topologically Sorted Source Nodes: [input_139, input_140], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf146, primals_147, 32768, grid=grid(32768), stream=stream0)
del primals_147
# Topologically Sorted Source Nodes: [input_141], Original ATen: [aten.convolution]
buf147 = extern_kernels.convolution(buf146, primals_148, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf147, (4, 128, 8, 8), (8192, 64, 8, 1))
buf148 = buf147; del buf147 # reuse
# Topologically Sorted Source Nodes: [input_141, input_142], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf148, primals_149, 32768, grid=grid(32768), stream=stream0)
del primals_149
# Topologically Sorted Source Nodes: [input_143], Original ATen: [aten.convolution]
buf149 = extern_kernels.convolution(buf148, primals_150, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf149, (4, 128, 8, 8), (8192, 64, 8, 1))
buf150 = buf149; del buf149 # reuse
# Topologically Sorted Source Nodes: [input_143, input_144], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf150, primals_151, 32768, grid=grid(32768), stream=stream0)
del primals_151
# Topologically Sorted Source Nodes: [input_145], Original ATen: [aten.convolution]
buf151 = extern_kernels.convolution(buf150, primals_152, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf151, (4, 128, 8, 8), (8192, 64, 8, 1))
buf152 = buf151; del buf151 # reuse
# Topologically Sorted Source Nodes: [input_145, input_146], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf152, primals_153, 32768, grid=grid(32768), stream=stream0)
del primals_153
# Topologically Sorted Source Nodes: [input_147], Original ATen: [aten.convolution]
buf153 = extern_kernels.convolution(buf152, primals_154, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf153, (4, 128, 8, 8), (8192, 64, 8, 1))
buf154 = buf153; del buf153 # reuse
# Topologically Sorted Source Nodes: [input_147, input_148], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf154, primals_155, 32768, grid=grid(32768), stream=stream0)
del primals_155
# Topologically Sorted Source Nodes: [input_149], Original ATen: [aten.convolution]
buf155 = extern_kernels.convolution(buf154, primals_156, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf155, (4, 19, 8, 8), (1216, 64, 8, 1))
buf156 = empty_strided_cuda((4, 185, 8, 8), (11840, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [out6], Original ATen: [aten.cat]
triton_poi_fused_cat_9.run(buf142, primals_143, buf155, primals_157, buf29, buf156, 47360, grid=grid(47360), stream=stream0)
del buf142
del buf155
del primals_143
del primals_157
# Topologically Sorted Source Nodes: [input_150], Original ATen: [aten.convolution]
buf157 = extern_kernels.convolution(buf156, primals_158, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf157, (4, 128, 8, 8), (8192, 64, 8, 1))
buf158 = buf157; del buf157 # reuse
# Topologically Sorted Source Nodes: [input_150, input_151], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf158, primals_159, 32768, grid=grid(32768), stream=stream0)
del primals_159
# Topologically Sorted Source Nodes: [input_152], Original ATen: [aten.convolution]
buf159 = extern_kernels.convolution(buf158, primals_160, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf159, (4, 128, 8, 8), (8192, 64, 8, 1))
buf160 = buf159; del buf159 # reuse
# Topologically Sorted Source Nodes: [input_152, input_153], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf160, primals_161, 32768, grid=grid(32768), stream=stream0)
del primals_161
# Topologically Sorted Source Nodes: [input_154], Original ATen: [aten.convolution]
buf161 = extern_kernels.convolution(buf160, primals_162, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf161, (4, 128, 8, 8), (8192, 64, 8, 1))
buf162 = buf161; del buf161 # reuse
# Topologically Sorted Source Nodes: [input_154, input_155], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf162, primals_163, 32768, grid=grid(32768), stream=stream0)
del primals_163
# Topologically Sorted Source Nodes: [input_156], Original ATen: [aten.convolution]
buf163 = extern_kernels.convolution(buf162, primals_164, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf163, (4, 128, 8, 8), (8192, 64, 8, 1))
buf164 = buf163; del buf163 # reuse
# Topologically Sorted Source Nodes: [input_156, input_157], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf164, primals_165, 32768, grid=grid(32768), stream=stream0)
del primals_165
# Topologically Sorted Source Nodes: [input_158], Original ATen: [aten.convolution]
buf165 = extern_kernels.convolution(buf164, primals_166, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf165, (4, 128, 8, 8), (8192, 64, 8, 1))
buf166 = buf165; del buf165 # reuse
# Topologically Sorted Source Nodes: [input_158, input_159], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf166, primals_167, 32768, grid=grid(32768), stream=stream0)
del primals_167
# Topologically Sorted Source Nodes: [input_160], Original ATen: [aten.convolution]
buf167 = extern_kernels.convolution(buf166, primals_168, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf167, (4, 128, 8, 8), (8192, 64, 8, 1))
buf168 = buf167; del buf167 # reuse
# Topologically Sorted Source Nodes: [input_160, input_161], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf168, primals_169, 32768, grid=grid(32768), stream=stream0)
del primals_169
# Topologically Sorted Source Nodes: [input_162], Original ATen: [aten.convolution]
buf169 = extern_kernels.convolution(buf168, primals_170, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf169, (4, 38, 8, 8), (2432, 64, 8, 1))
buf170 = buf169; del buf169 # reuse
# Topologically Sorted Source Nodes: [input_162], Original ATen: [aten.convolution]
triton_poi_fused_convolution_10.run(buf170, primals_171, 9728, grid=grid(9728), stream=stream0)
del primals_171
# Topologically Sorted Source Nodes: [input_163], Original ATen: [aten.convolution]
buf171 = extern_kernels.convolution(buf156, primals_172, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf171, (4, 128, 8, 8), (8192, 64, 8, 1))
buf172 = buf171; del buf171 # reuse
# Topologically Sorted Source Nodes: [input_163, input_164], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf172, primals_173, 32768, grid=grid(32768), stream=stream0)
del primals_173
# Topologically Sorted Source Nodes: [input_165], Original ATen: [aten.convolution]
buf173 = extern_kernels.convolution(buf172, primals_174, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf173, (4, 128, 8, 8), (8192, 64, 8, 1))
buf174 = buf173; del buf173 # reuse
# Topologically Sorted Source Nodes: [input_165, input_166], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf174, primals_175, 32768, grid=grid(32768), stream=stream0)
del primals_175
# Topologically Sorted Source Nodes: [input_167], Original ATen: [aten.convolution]
buf175 = extern_kernels.convolution(buf174, primals_176, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf175, (4, 128, 8, 8), (8192, 64, 8, 1))
buf176 = buf175; del buf175 # reuse
# Topologically Sorted Source Nodes: [input_167, input_168], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf176, primals_177, 32768, grid=grid(32768), stream=stream0)
del primals_177
# Topologically Sorted Source Nodes: [input_169], Original ATen: [aten.convolution]
buf177 = extern_kernels.convolution(buf176, primals_178, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf177, (4, 128, 8, 8), (8192, 64, 8, 1))
buf178 = buf177; del buf177 # reuse
# Topologically Sorted Source Nodes: [input_169, input_170], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf178, primals_179, 32768, grid=grid(32768), stream=stream0)
del primals_179
# Topologically Sorted Source Nodes: [input_171], Original ATen: [aten.convolution]
buf179 = extern_kernels.convolution(buf178, primals_180, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf179, (4, 128, 8, 8), (8192, 64, 8, 1))
buf180 = buf179; del buf179 # reuse
# Topologically Sorted Source Nodes: [input_171, input_172], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf180, primals_181, 32768, grid=grid(32768), stream=stream0)
del primals_181
# Topologically Sorted Source Nodes: [input_173], Original ATen: [aten.convolution]
buf181 = extern_kernels.convolution(buf180, primals_182, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf181, (4, 128, 8, 8), (8192, 64, 8, 1))
buf182 = buf181; del buf181 # reuse
# Topologically Sorted Source Nodes: [input_173, input_174], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf182, primals_183, 32768, grid=grid(32768), stream=stream0)
del primals_183
# Topologically Sorted Source Nodes: [input_175], Original ATen: [aten.convolution]
buf183 = extern_kernels.convolution(buf182, primals_184, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf183, (4, 19, 8, 8), (1216, 64, 8, 1))
buf184 = buf183; del buf183 # reuse
buf185 = empty_strided_cuda((4, 19, 8, 8), (1280, 64, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [input_175, input_176], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_11.run(buf184, primals_185, buf185, 4864, grid=grid(4864), stream=stream0)
del primals_185
return (buf170, buf184, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34, primals_36, primals_38, primals_40, primals_42, primals_44, primals_46, primals_48, primals_50, primals_52, primals_54, primals_56, primals_58, primals_60, primals_62, primals_64, primals_66, primals_68, primals_70, primals_72, primals_74, primals_76, primals_78, primals_80, primals_82, primals_84, primals_86, primals_88, primals_90, primals_92, primals_94, primals_96, primals_98, primals_100, primals_102, primals_104, primals_106, primals_108, primals_110, primals_112, primals_114, primals_116, primals_118, primals_120, primals_122, primals_124, primals_126, primals_128, primals_130, primals_132, primals_134, primals_136, primals_138, primals_140, primals_142, primals_144, primals_146, primals_148, primals_150, primals_152, primals_154, primals_156, primals_158, primals_160, primals_162, primals_164, primals_166, primals_168, primals_170, primals_172, primals_174, primals_176, primals_178, primals_180, primals_182, primals_184, buf1, buf3, buf4, buf5, buf7, buf9, buf10, buf11, buf13, buf15, buf17, buf19, buf20, buf21, buf23, buf25, buf27, buf29, buf31, buf33, buf35, buf37, buf40, buf42, buf44, buf46, buf48, buf50, buf52, buf54, buf56, buf58, buf60, buf63, buf65, buf67, buf69, buf71, buf73, buf75, buf77, buf79, buf81, buf83, buf85, buf87, buf90, buf92, buf94, buf96, buf98, buf100, buf102, buf104, buf106, buf108, buf110, buf112, buf114, buf117, buf119, buf121, buf123, buf125, buf127, buf129, buf131, buf133, buf135, buf137, buf139, buf141, buf144, buf146, buf148, buf150, buf152, buf154, buf156, buf158, buf160, buf162, buf164, buf166, buf168, buf172, buf174, buf176, buf178, buf180, buf182, buf185, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((256, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((128, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_28 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_29 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_30 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_31 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_32 = rand_strided((512, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_33 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_34 = rand_strided((38, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_35 = rand_strided((38, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_36 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_37 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_38 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_39 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_40 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_41 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_42 = rand_strided((512, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_43 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_44 = rand_strided((19, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_45 = rand_strided((19, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_46 = rand_strided((128, 185, 7, 7), (9065, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_47 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_48 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_49 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_50 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_51 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_52 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_53 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_54 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_55 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_56 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_57 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_58 = rand_strided((38, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_59 = rand_strided((38, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_60 = rand_strided((128, 185, 7, 7), (9065, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_61 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_62 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_63 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_64 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_65 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_66 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_67 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_68 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_69 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_70 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_71 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_72 = rand_strided((19, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_73 = rand_strided((19, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_74 = rand_strided((128, 185, 7, 7), (9065, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_75 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_76 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_77 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_78 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_79 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_80 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_81 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_82 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_83 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_84 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_85 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_86 = rand_strided((38, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_87 = rand_strided((38, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_88 = rand_strided((128, 185, 7, 7), (9065, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_89 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_90 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_91 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_92 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_93 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_94 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_95 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_96 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_97 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_98 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_99 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_100 = rand_strided((19, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_101 = rand_strided((19, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_102 = rand_strided((128, 185, 7, 7), (9065, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_103 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_104 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_105 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_106 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_107 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_108 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_109 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_110 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_111 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_112 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_113 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_114 = rand_strided((38, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_115 = rand_strided((38, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_116 = rand_strided((128, 185, 7, 7), (9065, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_117 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_118 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_119 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_120 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_121 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_122 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_123 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_124 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_125 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_126 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_127 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_128 = rand_strided((19, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_129 = rand_strided((19, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_130 = rand_strided((128, 185, 7, 7), (9065, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_131 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_132 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_133 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_134 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_135 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_136 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_137 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_138 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_139 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_140 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_141 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_142 = rand_strided((38, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_143 = rand_strided((38, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_144 = rand_strided((128, 185, 7, 7), (9065, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_145 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_146 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_147 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_148 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_149 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_150 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_151 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_152 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_153 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_154 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_155 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_156 = rand_strided((19, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_157 = rand_strided((19, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_158 = rand_strided((128, 185, 7, 7), (9065, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_159 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_160 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_161 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_162 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_163 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_164 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_165 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_166 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_167 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_168 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_169 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_170 = rand_strided((38, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_171 = rand_strided((38, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_172 = rand_strided((128, 185, 7, 7), (9065, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_173 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_174 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_175 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_176 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_177 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_178 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_179 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_180 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_181 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_182 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_183 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_184 = rand_strided((19, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_185 = rand_strided((19, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102, primals_103, primals_104, primals_105, primals_106, primals_107, primals_108, primals_109, primals_110, primals_111, primals_112, primals_113, primals_114, primals_115, primals_116, primals_117, primals_118, primals_119, primals_120, primals_121, primals_122, primals_123, primals_124, primals_125, primals_126, primals_127, primals_128, primals_129, primals_130, primals_131, primals_132, primals_133, primals_134, primals_135, primals_136, primals_137, primals_138, primals_139, primals_140, primals_141, primals_142, primals_143, primals_144, primals_145, primals_146, primals_147, primals_148, primals_149, primals_150, primals_151, primals_152, primals_153, primals_154, primals_155, primals_156, primals_157, primals_158, primals_159, primals_160, primals_161, primals_162, primals_163, primals_164, primals_165, primals_166, primals_167, primals_168, primals_169, primals_170, primals_171, primals_172, primals_173, primals_174, primals_175, primals_176, primals_177, primals_178, primals_179, primals_180, primals_181, primals_182, primals_183, primals_184, primals_185])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from collections import OrderedDict
def _make_layers(block, no_relu_layers):
layers = []
for layer_name, v in block.items():
if 'pool' in layer_name:
layer = torch.nn.MaxPool2d(kernel_size=v[0], stride=v[1],
padding=v[2])
layers.append((layer_name, layer))
else:
conv2d = torch.nn.Conv2d(in_channels=v[0], out_channels=v[1],
kernel_size=v[2], stride=v[3], padding=v[4])
layers.append((layer_name, conv2d))
if layer_name not in no_relu_layers:
layers.append(('relu_' + layer_name, torch.nn.ReLU(inplace=
True)))
return torch.nn.Sequential(OrderedDict(layers))
class BodyPoseModel(torch.nn.Module):
def __init__(self):
super(BodyPoseModel, self).__init__()
no_relu_layers = ['conv5_5_CPM_L1', 'conv5_5_CPM_L2',
'Mconv7_stage2_L1', 'Mconv7_stage2_L2', 'Mconv7_stage3_L1',
'Mconv7_stage3_L2', 'Mconv7_stage4_L1', 'Mconv7_stage4_L2',
'Mconv7_stage5_L1', 'Mconv7_stage5_L2', 'Mconv7_stage6_L1',
'Mconv7_stage6_L1']
blocks = {}
block0 = OrderedDict({'conv1_1': [3, 64, 3, 1, 1], 'conv1_2': [64,
64, 3, 1, 1], 'pool1_stage1': [2, 2, 0], 'conv2_1': [64, 128, 3,
1, 1], 'conv2_2': [128, 128, 3, 1, 1], 'pool2_stage1': [2, 2, 0
], 'conv3_1': [128, 256, 3, 1, 1], 'conv3_2': [256, 256, 3, 1,
1], 'conv3_3': [256, 256, 3, 1, 1], 'conv3_4': [256, 256, 3, 1,
1], 'pool3_stage1': [2, 2, 0], 'conv4_1': [256, 512, 3, 1, 1],
'conv4_2': [512, 512, 3, 1, 1], 'conv4_3_CPM': [512, 256, 3, 1,
1], 'conv4_4_CPM': [256, 128, 3, 1, 1]})
block1_1 = OrderedDict({'conv5_1_CPM_L1': [128, 128, 3, 1, 1],
'conv5_2_CPM_L1': [128, 128, 3, 1, 1], 'conv5_3_CPM_L1': [128,
128, 3, 1, 1], 'conv5_4_CPM_L1': [128, 512, 1, 1, 0],
'conv5_5_CPM_L1': [512, 38, 1, 1, 0]})
block1_2 = OrderedDict({'conv5_1_CPM_L2': [128, 128, 3, 1, 1],
'conv5_2_CPM_L2': [128, 128, 3, 1, 1], 'conv5_3_CPM_L2': [128,
128, 3, 1, 1], 'conv5_4_CPM_L2': [128, 512, 1, 1, 0],
'conv5_5_CPM_L2': [512, 19, 1, 1, 0]})
blocks['block1_1'] = block1_1
blocks['block1_2'] = block1_2
self.model0 = _make_layers(block0, no_relu_layers)
for i in range(2, 7):
blocks['block%d_1' % i] = OrderedDict({('Mconv1_stage%d_L1' % i
): [185, 128, 7, 1, 3], ('Mconv2_stage%d_L1' % i): [128,
128, 7, 1, 3], ('Mconv3_stage%d_L1' % i): [128, 128, 7, 1,
3], ('Mconv4_stage%d_L1' % i): [128, 128, 7, 1, 3], (
'Mconv5_stage%d_L1' % i): [128, 128, 7, 1, 3], (
'Mconv6_stage%d_L1' % i): [128, 128, 1, 1, 0], (
'Mconv7_stage%d_L1' % i): [128, 38, 1, 1, 0]})
blocks['block%d_2' % i] = OrderedDict({('Mconv1_stage%d_L2' % i
): [185, 128, 7, 1, 3], ('Mconv2_stage%d_L2' % i): [128,
128, 7, 1, 3], ('Mconv3_stage%d_L2' % i): [128, 128, 7, 1,
3], ('Mconv4_stage%d_L2' % i): [128, 128, 7, 1, 3], (
'Mconv5_stage%d_L2' % i): [128, 128, 7, 1, 3], (
'Mconv6_stage%d_L2' % i): [128, 128, 1, 1, 0], (
'Mconv7_stage%d_L2' % i): [128, 19, 1, 1, 0]})
for k in blocks.keys():
blocks[k] = _make_layers(blocks[k], no_relu_layers)
self.model1_1 = blocks['block1_1']
self.model2_1 = blocks['block2_1']
self.model3_1 = blocks['block3_1']
self.model4_1 = blocks['block4_1']
self.model5_1 = blocks['block5_1']
self.model6_1 = blocks['block6_1']
self.model1_2 = blocks['block1_2']
self.model2_2 = blocks['block2_2']
self.model3_2 = blocks['block3_2']
self.model4_2 = blocks['block4_2']
self.model5_2 = blocks['block5_2']
self.model6_2 = blocks['block6_2']
def forward(self, x):
out1 = self.model0(x)
out1_1 = self.model1_1(out1)
out1_2 = self.model1_2(out1)
out2 = torch.cat([out1_1, out1_2, out1], 1)
out2_1 = self.model2_1(out2)
out2_2 = self.model2_2(out2)
out3 = torch.cat([out2_1, out2_2, out1], 1)
out3_1 = self.model3_1(out3)
out3_2 = self.model3_2(out3)
out4 = torch.cat([out3_1, out3_2, out1], 1)
out4_1 = self.model4_1(out4)
out4_2 = self.model4_2(out4)
out5 = torch.cat([out4_1, out4_2, out1], 1)
out5_1 = self.model5_1(out5)
out5_2 = self.model5_2(out5)
out6 = torch.cat([out5_1, out5_2, out1], 1)
out6_1 = self.model6_1(out6)
out6_2 = self.model6_2(out6)
return out6_1, out6_2
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from collections import OrderedDict
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 512
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_cat_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 47360
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 64 % 185
x0 = xindex % 64
x2 = xindex // 11840
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 38, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x1 + 2432 * x2), tmp4 & xmask,
other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 57, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tmp10 & tmp12
tmp14 = tl.load(in_ptr2 + (x0 + 64 * (-38 + x1) + 1216 * x2), tmp13 &
xmask, other=0.0)
tmp15 = tl.load(in_ptr3 + (-38 + x1), tmp13 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp13, tmp16, tmp17)
tmp19 = tmp0 >= tmp11
tl.full([1], 185, tl.int64)
tmp22 = tl.load(in_ptr4 + (x0 + 64 * (-57 + x1) + 8192 * x2), tmp19 &
xmask, other=0.0)
tmp23 = tl.where(tmp13, tmp18, tmp22)
tmp24 = tl.where(tmp4, tmp9, tmp23)
tl.store(out_ptr0 + x3, tmp24, xmask)
@triton.jit
def triton_poi_fused_convolution_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 9728
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 64 % 38
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_11(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4864
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = xindex // 64 % 19
x2 = xindex // 1216
x3 = xindex % 1216
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x3 + 1280 * x2), tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41, primals_42,
primals_43, primals_44, primals_45, primals_46, primals_47,
primals_48, primals_49, primals_50, primals_51, primals_52,
primals_53, primals_54, primals_55, primals_56, primals_57,
primals_58, primals_59, primals_60, primals_61, primals_62,
primals_63, primals_64, primals_65, primals_66, primals_67,
primals_68, primals_69, primals_70, primals_71, primals_72,
primals_73, primals_74, primals_75, primals_76, primals_77,
primals_78, primals_79, primals_80, primals_81, primals_82,
primals_83, primals_84, primals_85, primals_86, primals_87,
primals_88, primals_89, primals_90, primals_91, primals_92,
primals_93, primals_94, primals_95, primals_96, primals_97,
primals_98, primals_99, primals_100, primals_101, primals_102,
primals_103, primals_104, primals_105, primals_106, primals_107,
primals_108, primals_109, primals_110, primals_111, primals_112,
primals_113, primals_114, primals_115, primals_116, primals_117,
primals_118, primals_119, primals_120, primals_121, primals_122,
primals_123, primals_124, primals_125, primals_126, primals_127,
primals_128, primals_129, primals_130, primals_131, primals_132,
primals_133, primals_134, primals_135, primals_136, primals_137,
primals_138, primals_139, primals_140, primals_141, primals_142,
primals_143, primals_144, primals_145, primals_146, primals_147,
primals_148, primals_149, primals_150, primals_151, primals_152,
primals_153, primals_154, primals_155, primals_156, primals_157,
primals_158, primals_159, primals_160, primals_161, primals_162,
primals_163, primals_164, primals_165, primals_166, primals_167,
primals_168, primals_169, primals_170, primals_171, primals_172,
primals_173, primals_174, primals_175, primals_176, primals_177,
primals_178, primals_179, primals_180, primals_181, primals_182,
primals_183, primals_184, primals_185) = args
args.clear()
assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_11, (256,), (1,))
assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_13, (256,), (1,))
assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_15, (256,), (1,))
assert_size_stride(primals_16, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_17, (256,), (1,))
assert_size_stride(primals_18, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_19, (512,), (1,))
assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_21, (512,), (1,))
assert_size_stride(primals_22, (256, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_23, (256,), (1,))
assert_size_stride(primals_24, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_25, (128,), (1,))
assert_size_stride(primals_26, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_27, (128,), (1,))
assert_size_stride(primals_28, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_29, (128,), (1,))
assert_size_stride(primals_30, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_31, (128,), (1,))
assert_size_stride(primals_32, (512, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_33, (512,), (1,))
assert_size_stride(primals_34, (38, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_35, (38,), (1,))
assert_size_stride(primals_36, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_37, (128,), (1,))
assert_size_stride(primals_38, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_39, (128,), (1,))
assert_size_stride(primals_40, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_41, (128,), (1,))
assert_size_stride(primals_42, (512, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_43, (512,), (1,))
assert_size_stride(primals_44, (19, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_45, (19,), (1,))
assert_size_stride(primals_46, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_47, (128,), (1,))
assert_size_stride(primals_48, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_49, (128,), (1,))
assert_size_stride(primals_50, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_51, (128,), (1,))
assert_size_stride(primals_52, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_53, (128,), (1,))
assert_size_stride(primals_54, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_55, (128,), (1,))
assert_size_stride(primals_56, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_57, (128,), (1,))
assert_size_stride(primals_58, (38, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_59, (38,), (1,))
assert_size_stride(primals_60, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_61, (128,), (1,))
assert_size_stride(primals_62, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_63, (128,), (1,))
assert_size_stride(primals_64, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_65, (128,), (1,))
assert_size_stride(primals_66, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_67, (128,), (1,))
assert_size_stride(primals_68, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_69, (128,), (1,))
assert_size_stride(primals_70, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_71, (128,), (1,))
assert_size_stride(primals_72, (19, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_73, (19,), (1,))
assert_size_stride(primals_74, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_75, (128,), (1,))
assert_size_stride(primals_76, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_77, (128,), (1,))
assert_size_stride(primals_78, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_79, (128,), (1,))
assert_size_stride(primals_80, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_81, (128,), (1,))
assert_size_stride(primals_82, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_83, (128,), (1,))
assert_size_stride(primals_84, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_85, (128,), (1,))
assert_size_stride(primals_86, (38, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_87, (38,), (1,))
assert_size_stride(primals_88, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_89, (128,), (1,))
assert_size_stride(primals_90, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_91, (128,), (1,))
assert_size_stride(primals_92, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_93, (128,), (1,))
assert_size_stride(primals_94, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_95, (128,), (1,))
assert_size_stride(primals_96, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_97, (128,), (1,))
assert_size_stride(primals_98, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_99, (128,), (1,))
assert_size_stride(primals_100, (19, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_101, (19,), (1,))
assert_size_stride(primals_102, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_103, (128,), (1,))
assert_size_stride(primals_104, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_105, (128,), (1,))
assert_size_stride(primals_106, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_107, (128,), (1,))
assert_size_stride(primals_108, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_109, (128,), (1,))
assert_size_stride(primals_110, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_111, (128,), (1,))
assert_size_stride(primals_112, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_113, (128,), (1,))
assert_size_stride(primals_114, (38, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_115, (38,), (1,))
assert_size_stride(primals_116, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_117, (128,), (1,))
assert_size_stride(primals_118, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_119, (128,), (1,))
assert_size_stride(primals_120, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_121, (128,), (1,))
assert_size_stride(primals_122, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_123, (128,), (1,))
assert_size_stride(primals_124, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_125, (128,), (1,))
assert_size_stride(primals_126, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_127, (128,), (1,))
assert_size_stride(primals_128, (19, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_129, (19,), (1,))
assert_size_stride(primals_130, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_131, (128,), (1,))
assert_size_stride(primals_132, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_133, (128,), (1,))
assert_size_stride(primals_134, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_135, (128,), (1,))
assert_size_stride(primals_136, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_137, (128,), (1,))
assert_size_stride(primals_138, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_139, (128,), (1,))
assert_size_stride(primals_140, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_141, (128,), (1,))
assert_size_stride(primals_142, (38, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_143, (38,), (1,))
assert_size_stride(primals_144, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_145, (128,), (1,))
assert_size_stride(primals_146, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_147, (128,), (1,))
assert_size_stride(primals_148, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_149, (128,), (1,))
assert_size_stride(primals_150, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_151, (128,), (1,))
assert_size_stride(primals_152, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_153, (128,), (1,))
assert_size_stride(primals_154, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_155, (128,), (1,))
assert_size_stride(primals_156, (19, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_157, (19,), (1,))
assert_size_stride(primals_158, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_159, (128,), (1,))
assert_size_stride(primals_160, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_161, (128,), (1,))
assert_size_stride(primals_162, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_163, (128,), (1,))
assert_size_stride(primals_164, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_165, (128,), (1,))
assert_size_stride(primals_166, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_167, (128,), (1,))
assert_size_stride(primals_168, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_169, (128,), (1,))
assert_size_stride(primals_170, (38, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_171, (38,), (1,))
assert_size_stride(primals_172, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_173, (128,), (1,))
assert_size_stride(primals_174, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_175, (128,), (1,))
assert_size_stride(primals_176, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_177, (128,), (1,))
assert_size_stride(primals_178, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_179, (128,), (1,))
assert_size_stride(primals_180, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_181, (128,), (1,))
assert_size_stride(primals_182, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_183, (128,), (1,))
assert_size_stride(primals_184, (19, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_185, (19,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(1048576)](buf1, primals_2,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_0[grid(1048576)](buf3, primals_5,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.float32)
buf5 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(262144)](buf3, buf4,
buf5, 262144, XBLOCK=512, num_warps=8, num_stages=1)
buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 128, 32, 32), (131072, 1024, 32, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_2[grid(524288)](buf7, primals_7,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 128, 32, 32), (131072, 1024, 32, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_2[grid(524288)](buf9, primals_9,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_9
buf10 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.float32)
buf11 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(131072)](buf9,
buf10, buf11, 131072, XBLOCK=512, num_warps=8, num_stages=1)
buf12 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 256, 16, 16), (65536, 256, 16, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_relu_4[grid(262144)](buf13, primals_11,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_11
buf14 = extern_kernels.convolution(buf13, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 256, 16, 16), (65536, 256, 16, 1))
buf15 = buf14
del buf14
triton_poi_fused_convolution_relu_4[grid(262144)](buf15, primals_13,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_13
buf16 = extern_kernels.convolution(buf15, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 256, 16, 16), (65536, 256, 16, 1))
buf17 = buf16
del buf16
triton_poi_fused_convolution_relu_4[grid(262144)](buf17, primals_15,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_15
buf18 = extern_kernels.convolution(buf17, primals_16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 256, 16, 16), (65536, 256, 16, 1))
buf19 = buf18
del buf18
triton_poi_fused_convolution_relu_4[grid(262144)](buf19, primals_17,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_17
buf20 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.float32)
buf21 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_5[grid(65536)](buf19,
buf20, buf21, 65536, XBLOCK=256, num_warps=4, num_stages=1)
buf22 = extern_kernels.convolution(buf20, primals_18, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 512, 8, 8), (32768, 64, 8, 1))
buf23 = buf22
del buf22
triton_poi_fused_convolution_relu_6[grid(131072)](buf23, primals_19,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_19
buf24 = extern_kernels.convolution(buf23, primals_20, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 512, 8, 8), (32768, 64, 8, 1))
buf25 = buf24
del buf24
triton_poi_fused_convolution_relu_6[grid(131072)](buf25, primals_21,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_21
buf26 = extern_kernels.convolution(buf25, primals_22, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 256, 8, 8), (16384, 64, 8, 1))
buf27 = buf26
del buf26
triton_poi_fused_convolution_relu_7[grid(65536)](buf27, primals_23,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_23
buf28 = extern_kernels.convolution(buf27, primals_24, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 128, 8, 8), (8192, 64, 8, 1))
buf29 = buf28
del buf28
triton_poi_fused_convolution_relu_8[grid(32768)](buf29, primals_25,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_25
buf30 = extern_kernels.convolution(buf29, primals_26, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 128, 8, 8), (8192, 64, 8, 1))
buf31 = buf30
del buf30
triton_poi_fused_convolution_relu_8[grid(32768)](buf31, primals_27,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_27
buf32 = extern_kernels.convolution(buf31, primals_28, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 128, 8, 8), (8192, 64, 8, 1))
buf33 = buf32
del buf32
triton_poi_fused_convolution_relu_8[grid(32768)](buf33, primals_29,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_29
buf34 = extern_kernels.convolution(buf33, primals_30, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf34, (4, 128, 8, 8), (8192, 64, 8, 1))
buf35 = buf34
del buf34
triton_poi_fused_convolution_relu_8[grid(32768)](buf35, primals_31,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_31
buf36 = extern_kernels.convolution(buf35, primals_32, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 512, 8, 8), (32768, 64, 8, 1))
buf37 = buf36
del buf36
triton_poi_fused_convolution_relu_6[grid(131072)](buf37, primals_33,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_33
buf38 = extern_kernels.convolution(buf37, primals_34, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 38, 8, 8), (2432, 64, 8, 1))
buf39 = extern_kernels.convolution(buf29, primals_36, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf39, (4, 128, 8, 8), (8192, 64, 8, 1))
buf40 = buf39
del buf39
triton_poi_fused_convolution_relu_8[grid(32768)](buf40, primals_37,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_37
buf41 = extern_kernels.convolution(buf40, primals_38, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf41, (4, 128, 8, 8), (8192, 64, 8, 1))
buf42 = buf41
del buf41
triton_poi_fused_convolution_relu_8[grid(32768)](buf42, primals_39,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_39
buf43 = extern_kernels.convolution(buf42, primals_40, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf43, (4, 128, 8, 8), (8192, 64, 8, 1))
buf44 = buf43
del buf43
triton_poi_fused_convolution_relu_8[grid(32768)](buf44, primals_41,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_41
buf45 = extern_kernels.convolution(buf44, primals_42, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf45, (4, 512, 8, 8), (32768, 64, 8, 1))
buf46 = buf45
del buf45
triton_poi_fused_convolution_relu_6[grid(131072)](buf46, primals_43,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_43
buf47 = extern_kernels.convolution(buf46, primals_44, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf47, (4, 19, 8, 8), (1216, 64, 8, 1))
buf48 = empty_strided_cuda((4, 185, 8, 8), (11840, 64, 8, 1), torch
.float32)
triton_poi_fused_cat_9[grid(47360)](buf38, primals_35, buf47,
primals_45, buf29, buf48, 47360, XBLOCK=512, num_warps=4,
num_stages=1)
del buf38
del buf47
del primals_35
del primals_45
buf49 = extern_kernels.convolution(buf48, primals_46, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf49, (4, 128, 8, 8), (8192, 64, 8, 1))
buf50 = buf49
del buf49
triton_poi_fused_convolution_relu_8[grid(32768)](buf50, primals_47,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_47
buf51 = extern_kernels.convolution(buf50, primals_48, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf51, (4, 128, 8, 8), (8192, 64, 8, 1))
buf52 = buf51
del buf51
triton_poi_fused_convolution_relu_8[grid(32768)](buf52, primals_49,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_49
buf53 = extern_kernels.convolution(buf52, primals_50, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf53, (4, 128, 8, 8), (8192, 64, 8, 1))
buf54 = buf53
del buf53
triton_poi_fused_convolution_relu_8[grid(32768)](buf54, primals_51,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_51
buf55 = extern_kernels.convolution(buf54, primals_52, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf55, (4, 128, 8, 8), (8192, 64, 8, 1))
buf56 = buf55
del buf55
triton_poi_fused_convolution_relu_8[grid(32768)](buf56, primals_53,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_53
buf57 = extern_kernels.convolution(buf56, primals_54, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf57, (4, 128, 8, 8), (8192, 64, 8, 1))
buf58 = buf57
del buf57
triton_poi_fused_convolution_relu_8[grid(32768)](buf58, primals_55,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_55
buf59 = extern_kernels.convolution(buf58, primals_56, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf59, (4, 128, 8, 8), (8192, 64, 8, 1))
buf60 = buf59
del buf59
triton_poi_fused_convolution_relu_8[grid(32768)](buf60, primals_57,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_57
buf61 = extern_kernels.convolution(buf60, primals_58, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf61, (4, 38, 8, 8), (2432, 64, 8, 1))
buf62 = extern_kernels.convolution(buf48, primals_60, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf62, (4, 128, 8, 8), (8192, 64, 8, 1))
buf63 = buf62
del buf62
triton_poi_fused_convolution_relu_8[grid(32768)](buf63, primals_61,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_61
buf64 = extern_kernels.convolution(buf63, primals_62, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf64, (4, 128, 8, 8), (8192, 64, 8, 1))
buf65 = buf64
del buf64
triton_poi_fused_convolution_relu_8[grid(32768)](buf65, primals_63,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_63
buf66 = extern_kernels.convolution(buf65, primals_64, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf66, (4, 128, 8, 8), (8192, 64, 8, 1))
buf67 = buf66
del buf66
triton_poi_fused_convolution_relu_8[grid(32768)](buf67, primals_65,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_65
buf68 = extern_kernels.convolution(buf67, primals_66, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf68, (4, 128, 8, 8), (8192, 64, 8, 1))
buf69 = buf68
del buf68
triton_poi_fused_convolution_relu_8[grid(32768)](buf69, primals_67,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_67
buf70 = extern_kernels.convolution(buf69, primals_68, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf70, (4, 128, 8, 8), (8192, 64, 8, 1))
buf71 = buf70
del buf70
triton_poi_fused_convolution_relu_8[grid(32768)](buf71, primals_69,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_69
buf72 = extern_kernels.convolution(buf71, primals_70, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf72, (4, 128, 8, 8), (8192, 64, 8, 1))
buf73 = buf72
del buf72
triton_poi_fused_convolution_relu_8[grid(32768)](buf73, primals_71,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_71
buf74 = extern_kernels.convolution(buf73, primals_72, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf74, (4, 19, 8, 8), (1216, 64, 8, 1))
buf75 = empty_strided_cuda((4, 185, 8, 8), (11840, 64, 8, 1), torch
.float32)
triton_poi_fused_cat_9[grid(47360)](buf61, primals_59, buf74,
primals_73, buf29, buf75, 47360, XBLOCK=512, num_warps=4,
num_stages=1)
del buf61
del buf74
del primals_59
del primals_73
buf76 = extern_kernels.convolution(buf75, primals_74, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf76, (4, 128, 8, 8), (8192, 64, 8, 1))
buf77 = buf76
del buf76
triton_poi_fused_convolution_relu_8[grid(32768)](buf77, primals_75,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_75
buf78 = extern_kernels.convolution(buf77, primals_76, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf78, (4, 128, 8, 8), (8192, 64, 8, 1))
buf79 = buf78
del buf78
triton_poi_fused_convolution_relu_8[grid(32768)](buf79, primals_77,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_77
buf80 = extern_kernels.convolution(buf79, primals_78, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf80, (4, 128, 8, 8), (8192, 64, 8, 1))
buf81 = buf80
del buf80
triton_poi_fused_convolution_relu_8[grid(32768)](buf81, primals_79,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_79
buf82 = extern_kernels.convolution(buf81, primals_80, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf82, (4, 128, 8, 8), (8192, 64, 8, 1))
buf83 = buf82
del buf82
triton_poi_fused_convolution_relu_8[grid(32768)](buf83, primals_81,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_81
buf84 = extern_kernels.convolution(buf83, primals_82, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf84, (4, 128, 8, 8), (8192, 64, 8, 1))
buf85 = buf84
del buf84
triton_poi_fused_convolution_relu_8[grid(32768)](buf85, primals_83,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_83
buf86 = extern_kernels.convolution(buf85, primals_84, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf86, (4, 128, 8, 8), (8192, 64, 8, 1))
buf87 = buf86
del buf86
triton_poi_fused_convolution_relu_8[grid(32768)](buf87, primals_85,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_85
buf88 = extern_kernels.convolution(buf87, primals_86, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf88, (4, 38, 8, 8), (2432, 64, 8, 1))
buf89 = extern_kernels.convolution(buf75, primals_88, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf89, (4, 128, 8, 8), (8192, 64, 8, 1))
buf90 = buf89
del buf89
triton_poi_fused_convolution_relu_8[grid(32768)](buf90, primals_89,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_89
buf91 = extern_kernels.convolution(buf90, primals_90, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf91, (4, 128, 8, 8), (8192, 64, 8, 1))
buf92 = buf91
del buf91
triton_poi_fused_convolution_relu_8[grid(32768)](buf92, primals_91,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_91
buf93 = extern_kernels.convolution(buf92, primals_92, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf93, (4, 128, 8, 8), (8192, 64, 8, 1))
buf94 = buf93
del buf93
triton_poi_fused_convolution_relu_8[grid(32768)](buf94, primals_93,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_93
buf95 = extern_kernels.convolution(buf94, primals_94, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf95, (4, 128, 8, 8), (8192, 64, 8, 1))
buf96 = buf95
del buf95
triton_poi_fused_convolution_relu_8[grid(32768)](buf96, primals_95,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_95
buf97 = extern_kernels.convolution(buf96, primals_96, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf97, (4, 128, 8, 8), (8192, 64, 8, 1))
buf98 = buf97
del buf97
triton_poi_fused_convolution_relu_8[grid(32768)](buf98, primals_97,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_97
buf99 = extern_kernels.convolution(buf98, primals_98, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf99, (4, 128, 8, 8), (8192, 64, 8, 1))
buf100 = buf99
del buf99
triton_poi_fused_convolution_relu_8[grid(32768)](buf100, primals_99,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_99
buf101 = extern_kernels.convolution(buf100, primals_100, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf101, (4, 19, 8, 8), (1216, 64, 8, 1))
buf102 = empty_strided_cuda((4, 185, 8, 8), (11840, 64, 8, 1),
torch.float32)
triton_poi_fused_cat_9[grid(47360)](buf88, primals_87, buf101,
primals_101, buf29, buf102, 47360, XBLOCK=512, num_warps=4,
num_stages=1)
del buf101
del buf88
del primals_101
del primals_87
buf103 = extern_kernels.convolution(buf102, primals_102, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf103, (4, 128, 8, 8), (8192, 64, 8, 1))
buf104 = buf103
del buf103
triton_poi_fused_convolution_relu_8[grid(32768)](buf104,
primals_103, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_103
buf105 = extern_kernels.convolution(buf104, primals_104, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf105, (4, 128, 8, 8), (8192, 64, 8, 1))
buf106 = buf105
del buf105
triton_poi_fused_convolution_relu_8[grid(32768)](buf106,
primals_105, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_105
buf107 = extern_kernels.convolution(buf106, primals_106, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf107, (4, 128, 8, 8), (8192, 64, 8, 1))
buf108 = buf107
del buf107
triton_poi_fused_convolution_relu_8[grid(32768)](buf108,
primals_107, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_107
buf109 = extern_kernels.convolution(buf108, primals_108, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf109, (4, 128, 8, 8), (8192, 64, 8, 1))
buf110 = buf109
del buf109
triton_poi_fused_convolution_relu_8[grid(32768)](buf110,
primals_109, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_109
buf111 = extern_kernels.convolution(buf110, primals_110, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf111, (4, 128, 8, 8), (8192, 64, 8, 1))
buf112 = buf111
del buf111
triton_poi_fused_convolution_relu_8[grid(32768)](buf112,
primals_111, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_111
buf113 = extern_kernels.convolution(buf112, primals_112, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf113, (4, 128, 8, 8), (8192, 64, 8, 1))
buf114 = buf113
del buf113
triton_poi_fused_convolution_relu_8[grid(32768)](buf114,
primals_113, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_113
buf115 = extern_kernels.convolution(buf114, primals_114, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf115, (4, 38, 8, 8), (2432, 64, 8, 1))
buf116 = extern_kernels.convolution(buf102, primals_116, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf116, (4, 128, 8, 8), (8192, 64, 8, 1))
buf117 = buf116
del buf116
triton_poi_fused_convolution_relu_8[grid(32768)](buf117,
primals_117, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_117
buf118 = extern_kernels.convolution(buf117, primals_118, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf118, (4, 128, 8, 8), (8192, 64, 8, 1))
buf119 = buf118
del buf118
triton_poi_fused_convolution_relu_8[grid(32768)](buf119,
primals_119, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_119
buf120 = extern_kernels.convolution(buf119, primals_120, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf120, (4, 128, 8, 8), (8192, 64, 8, 1))
buf121 = buf120
del buf120
triton_poi_fused_convolution_relu_8[grid(32768)](buf121,
primals_121, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_121
buf122 = extern_kernels.convolution(buf121, primals_122, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf122, (4, 128, 8, 8), (8192, 64, 8, 1))
buf123 = buf122
del buf122
triton_poi_fused_convolution_relu_8[grid(32768)](buf123,
primals_123, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_123
buf124 = extern_kernels.convolution(buf123, primals_124, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf124, (4, 128, 8, 8), (8192, 64, 8, 1))
buf125 = buf124
del buf124
triton_poi_fused_convolution_relu_8[grid(32768)](buf125,
primals_125, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_125
buf126 = extern_kernels.convolution(buf125, primals_126, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf126, (4, 128, 8, 8), (8192, 64, 8, 1))
buf127 = buf126
del buf126
triton_poi_fused_convolution_relu_8[grid(32768)](buf127,
primals_127, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_127
buf128 = extern_kernels.convolution(buf127, primals_128, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf128, (4, 19, 8, 8), (1216, 64, 8, 1))
buf129 = empty_strided_cuda((4, 185, 8, 8), (11840, 64, 8, 1),
torch.float32)
triton_poi_fused_cat_9[grid(47360)](buf115, primals_115, buf128,
primals_129, buf29, buf129, 47360, XBLOCK=512, num_warps=4,
num_stages=1)
del buf115
del buf128
del primals_115
del primals_129
buf130 = extern_kernels.convolution(buf129, primals_130, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf130, (4, 128, 8, 8), (8192, 64, 8, 1))
buf131 = buf130
del buf130
triton_poi_fused_convolution_relu_8[grid(32768)](buf131,
primals_131, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_131
buf132 = extern_kernels.convolution(buf131, primals_132, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf132, (4, 128, 8, 8), (8192, 64, 8, 1))
buf133 = buf132
del buf132
triton_poi_fused_convolution_relu_8[grid(32768)](buf133,
primals_133, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_133
buf134 = extern_kernels.convolution(buf133, primals_134, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf134, (4, 128, 8, 8), (8192, 64, 8, 1))
buf135 = buf134
del buf134
triton_poi_fused_convolution_relu_8[grid(32768)](buf135,
primals_135, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_135
buf136 = extern_kernels.convolution(buf135, primals_136, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf136, (4, 128, 8, 8), (8192, 64, 8, 1))
buf137 = buf136
del buf136
triton_poi_fused_convolution_relu_8[grid(32768)](buf137,
primals_137, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_137
buf138 = extern_kernels.convolution(buf137, primals_138, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf138, (4, 128, 8, 8), (8192, 64, 8, 1))
buf139 = buf138
del buf138
triton_poi_fused_convolution_relu_8[grid(32768)](buf139,
primals_139, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_139
buf140 = extern_kernels.convolution(buf139, primals_140, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf140, (4, 128, 8, 8), (8192, 64, 8, 1))
buf141 = buf140
del buf140
triton_poi_fused_convolution_relu_8[grid(32768)](buf141,
primals_141, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_141
buf142 = extern_kernels.convolution(buf141, primals_142, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf142, (4, 38, 8, 8), (2432, 64, 8, 1))
buf143 = extern_kernels.convolution(buf129, primals_144, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf143, (4, 128, 8, 8), (8192, 64, 8, 1))
buf144 = buf143
del buf143
triton_poi_fused_convolution_relu_8[grid(32768)](buf144,
primals_145, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_145
buf145 = extern_kernels.convolution(buf144, primals_146, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf145, (4, 128, 8, 8), (8192, 64, 8, 1))
buf146 = buf145
del buf145
triton_poi_fused_convolution_relu_8[grid(32768)](buf146,
primals_147, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_147
buf147 = extern_kernels.convolution(buf146, primals_148, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf147, (4, 128, 8, 8), (8192, 64, 8, 1))
buf148 = buf147
del buf147
triton_poi_fused_convolution_relu_8[grid(32768)](buf148,
primals_149, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_149
buf149 = extern_kernels.convolution(buf148, primals_150, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf149, (4, 128, 8, 8), (8192, 64, 8, 1))
buf150 = buf149
del buf149
triton_poi_fused_convolution_relu_8[grid(32768)](buf150,
primals_151, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_151
buf151 = extern_kernels.convolution(buf150, primals_152, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf151, (4, 128, 8, 8), (8192, 64, 8, 1))
buf152 = buf151
del buf151
triton_poi_fused_convolution_relu_8[grid(32768)](buf152,
primals_153, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_153
buf153 = extern_kernels.convolution(buf152, primals_154, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf153, (4, 128, 8, 8), (8192, 64, 8, 1))
buf154 = buf153
del buf153
triton_poi_fused_convolution_relu_8[grid(32768)](buf154,
primals_155, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_155
buf155 = extern_kernels.convolution(buf154, primals_156, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf155, (4, 19, 8, 8), (1216, 64, 8, 1))
buf156 = empty_strided_cuda((4, 185, 8, 8), (11840, 64, 8, 1),
torch.float32)
triton_poi_fused_cat_9[grid(47360)](buf142, primals_143, buf155,
primals_157, buf29, buf156, 47360, XBLOCK=512, num_warps=4,
num_stages=1)
del buf142
del buf155
del primals_143
del primals_157
buf157 = extern_kernels.convolution(buf156, primals_158, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf157, (4, 128, 8, 8), (8192, 64, 8, 1))
buf158 = buf157
del buf157
triton_poi_fused_convolution_relu_8[grid(32768)](buf158,
primals_159, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_159
buf159 = extern_kernels.convolution(buf158, primals_160, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf159, (4, 128, 8, 8), (8192, 64, 8, 1))
buf160 = buf159
del buf159
triton_poi_fused_convolution_relu_8[grid(32768)](buf160,
primals_161, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_161
buf161 = extern_kernels.convolution(buf160, primals_162, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf161, (4, 128, 8, 8), (8192, 64, 8, 1))
buf162 = buf161
del buf161
triton_poi_fused_convolution_relu_8[grid(32768)](buf162,
primals_163, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_163
buf163 = extern_kernels.convolution(buf162, primals_164, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf163, (4, 128, 8, 8), (8192, 64, 8, 1))
buf164 = buf163
del buf163
triton_poi_fused_convolution_relu_8[grid(32768)](buf164,
primals_165, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_165
buf165 = extern_kernels.convolution(buf164, primals_166, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf165, (4, 128, 8, 8), (8192, 64, 8, 1))
buf166 = buf165
del buf165
triton_poi_fused_convolution_relu_8[grid(32768)](buf166,
primals_167, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_167
buf167 = extern_kernels.convolution(buf166, primals_168, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf167, (4, 128, 8, 8), (8192, 64, 8, 1))
buf168 = buf167
del buf167
triton_poi_fused_convolution_relu_8[grid(32768)](buf168,
primals_169, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_169
buf169 = extern_kernels.convolution(buf168, primals_170, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf169, (4, 38, 8, 8), (2432, 64, 8, 1))
buf170 = buf169
del buf169
triton_poi_fused_convolution_10[grid(9728)](buf170, primals_171,
9728, XBLOCK=256, num_warps=4, num_stages=1)
del primals_171
buf171 = extern_kernels.convolution(buf156, primals_172, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf171, (4, 128, 8, 8), (8192, 64, 8, 1))
buf172 = buf171
del buf171
triton_poi_fused_convolution_relu_8[grid(32768)](buf172,
primals_173, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_173
buf173 = extern_kernels.convolution(buf172, primals_174, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf173, (4, 128, 8, 8), (8192, 64, 8, 1))
buf174 = buf173
del buf173
triton_poi_fused_convolution_relu_8[grid(32768)](buf174,
primals_175, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_175
buf175 = extern_kernels.convolution(buf174, primals_176, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf175, (4, 128, 8, 8), (8192, 64, 8, 1))
buf176 = buf175
del buf175
triton_poi_fused_convolution_relu_8[grid(32768)](buf176,
primals_177, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_177
buf177 = extern_kernels.convolution(buf176, primals_178, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf177, (4, 128, 8, 8), (8192, 64, 8, 1))
buf178 = buf177
del buf177
triton_poi_fused_convolution_relu_8[grid(32768)](buf178,
primals_179, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_179
buf179 = extern_kernels.convolution(buf178, primals_180, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf179, (4, 128, 8, 8), (8192, 64, 8, 1))
buf180 = buf179
del buf179
triton_poi_fused_convolution_relu_8[grid(32768)](buf180,
primals_181, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_181
buf181 = extern_kernels.convolution(buf180, primals_182, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf181, (4, 128, 8, 8), (8192, 64, 8, 1))
buf182 = buf181
del buf181
triton_poi_fused_convolution_relu_8[grid(32768)](buf182,
primals_183, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_183
buf183 = extern_kernels.convolution(buf182, primals_184, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf183, (4, 19, 8, 8), (1216, 64, 8, 1))
buf184 = buf183
del buf183
buf185 = empty_strided_cuda((4, 19, 8, 8), (1280, 64, 8, 1), torch.bool
)
triton_poi_fused_convolution_relu_threshold_backward_11[grid(4864)](
buf184, primals_185, buf185, 4864, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_185
return (buf170, buf184, primals_1, primals_3, primals_4, primals_6,
primals_8, primals_10, primals_12, primals_14, primals_16,
primals_18, primals_20, primals_22, primals_24, primals_26,
primals_28, primals_30, primals_32, primals_34, primals_36,
primals_38, primals_40, primals_42, primals_44, primals_46,
primals_48, primals_50, primals_52, primals_54, primals_56,
primals_58, primals_60, primals_62, primals_64, primals_66,
primals_68, primals_70, primals_72, primals_74, primals_76,
primals_78, primals_80, primals_82, primals_84, primals_86,
primals_88, primals_90, primals_92, primals_94, primals_96,
primals_98, primals_100, primals_102, primals_104, primals_106,
primals_108, primals_110, primals_112, primals_114, primals_116,
primals_118, primals_120, primals_122, primals_124, primals_126,
primals_128, primals_130, primals_132, primals_134, primals_136,
primals_138, primals_140, primals_142, primals_144, primals_146,
primals_148, primals_150, primals_152, primals_154, primals_156,
primals_158, primals_160, primals_162, primals_164, primals_166,
primals_168, primals_170, primals_172, primals_174, primals_176,
primals_178, primals_180, primals_182, primals_184, buf1, buf3,
buf4, buf5, buf7, buf9, buf10, buf11, buf13, buf15, buf17, buf19,
buf20, buf21, buf23, buf25, buf27, buf29, buf31, buf33, buf35,
buf37, buf40, buf42, buf44, buf46, buf48, buf50, buf52, buf54,
buf56, buf58, buf60, buf63, buf65, buf67, buf69, buf71, buf73,
buf75, buf77, buf79, buf81, buf83, buf85, buf87, buf90, buf92,
buf94, buf96, buf98, buf100, buf102, buf104, buf106, buf108, buf110,
buf112, buf114, buf117, buf119, buf121, buf123, buf125, buf127,
buf129, buf131, buf133, buf135, buf137, buf139, buf141, buf144,
buf146, buf148, buf150, buf152, buf154, buf156, buf158, buf160,
buf162, buf164, buf166, buf168, buf172, buf174, buf176, buf178,
buf180, buf182, buf185)
def _make_layers(block, no_relu_layers):
layers = []
for layer_name, v in block.items():
if 'pool' in layer_name:
layer = torch.nn.MaxPool2d(kernel_size=v[0], stride=v[1],
padding=v[2])
layers.append((layer_name, layer))
else:
conv2d = torch.nn.Conv2d(in_channels=v[0], out_channels=v[1],
kernel_size=v[2], stride=v[3], padding=v[4])
layers.append((layer_name, conv2d))
if layer_name not in no_relu_layers:
layers.append(('relu_' + layer_name, torch.nn.ReLU(inplace=
True)))
return torch.nn.Sequential(OrderedDict(layers))
class BodyPoseModelNew(torch.nn.Module):
def __init__(self):
super(BodyPoseModelNew, self).__init__()
no_relu_layers = ['conv5_5_CPM_L1', 'conv5_5_CPM_L2',
'Mconv7_stage2_L1', 'Mconv7_stage2_L2', 'Mconv7_stage3_L1',
'Mconv7_stage3_L2', 'Mconv7_stage4_L1', 'Mconv7_stage4_L2',
'Mconv7_stage5_L1', 'Mconv7_stage5_L2', 'Mconv7_stage6_L1',
'Mconv7_stage6_L1']
blocks = {}
block0 = OrderedDict({'conv1_1': [3, 64, 3, 1, 1], 'conv1_2': [64,
64, 3, 1, 1], 'pool1_stage1': [2, 2, 0], 'conv2_1': [64, 128, 3,
1, 1], 'conv2_2': [128, 128, 3, 1, 1], 'pool2_stage1': [2, 2, 0
], 'conv3_1': [128, 256, 3, 1, 1], 'conv3_2': [256, 256, 3, 1,
1], 'conv3_3': [256, 256, 3, 1, 1], 'conv3_4': [256, 256, 3, 1,
1], 'pool3_stage1': [2, 2, 0], 'conv4_1': [256, 512, 3, 1, 1],
'conv4_2': [512, 512, 3, 1, 1], 'conv4_3_CPM': [512, 256, 3, 1,
1], 'conv4_4_CPM': [256, 128, 3, 1, 1]})
block1_1 = OrderedDict({'conv5_1_CPM_L1': [128, 128, 3, 1, 1],
'conv5_2_CPM_L1': [128, 128, 3, 1, 1], 'conv5_3_CPM_L1': [128,
128, 3, 1, 1], 'conv5_4_CPM_L1': [128, 512, 1, 1, 0],
'conv5_5_CPM_L1': [512, 38, 1, 1, 0]})
block1_2 = OrderedDict({'conv5_1_CPM_L2': [128, 128, 3, 1, 1],
'conv5_2_CPM_L2': [128, 128, 3, 1, 1], 'conv5_3_CPM_L2': [128,
128, 3, 1, 1], 'conv5_4_CPM_L2': [128, 512, 1, 1, 0],
'conv5_5_CPM_L2': [512, 19, 1, 1, 0]})
blocks['block1_1'] = block1_1
blocks['block1_2'] = block1_2
self.model0 = _make_layers(block0, no_relu_layers)
for i in range(2, 7):
blocks['block%d_1' % i] = OrderedDict({('Mconv1_stage%d_L1' % i
): [185, 128, 7, 1, 3], ('Mconv2_stage%d_L1' % i): [128,
128, 7, 1, 3], ('Mconv3_stage%d_L1' % i): [128, 128, 7, 1,
3], ('Mconv4_stage%d_L1' % i): [128, 128, 7, 1, 3], (
'Mconv5_stage%d_L1' % i): [128, 128, 7, 1, 3], (
'Mconv6_stage%d_L1' % i): [128, 128, 1, 1, 0], (
'Mconv7_stage%d_L1' % i): [128, 38, 1, 1, 0]})
blocks['block%d_2' % i] = OrderedDict({('Mconv1_stage%d_L2' % i
): [185, 128, 7, 1, 3], ('Mconv2_stage%d_L2' % i): [128,
128, 7, 1, 3], ('Mconv3_stage%d_L2' % i): [128, 128, 7, 1,
3], ('Mconv4_stage%d_L2' % i): [128, 128, 7, 1, 3], (
'Mconv5_stage%d_L2' % i): [128, 128, 7, 1, 3], (
'Mconv6_stage%d_L2' % i): [128, 128, 1, 1, 0], (
'Mconv7_stage%d_L2' % i): [128, 19, 1, 1, 0]})
for k in blocks.keys():
blocks[k] = _make_layers(blocks[k], no_relu_layers)
self.model1_1 = blocks['block1_1']
self.model2_1 = blocks['block2_1']
self.model3_1 = blocks['block3_1']
self.model4_1 = blocks['block4_1']
self.model5_1 = blocks['block5_1']
self.model6_1 = blocks['block6_1']
self.model1_2 = blocks['block1_2']
self.model2_2 = blocks['block2_2']
self.model3_2 = blocks['block3_2']
self.model4_2 = blocks['block4_2']
self.model5_2 = blocks['block5_2']
self.model6_2 = blocks['block6_2']
def forward(self, input_0):
primals_1 = self.model0.conv1_1.weight
primals_2 = self.model0.conv1_1.bias
primals_4 = self.model0.conv1_2.weight
primals_5 = self.model0.conv1_2.bias
primals_6 = self.model0.conv2_1.weight
primals_7 = self.model0.conv2_1.bias
primals_8 = self.model0.conv2_2.weight
primals_9 = self.model0.conv2_2.bias
primals_10 = self.model0.conv3_1.weight
primals_11 = self.model0.conv3_1.bias
primals_12 = self.model0.conv3_2.weight
primals_13 = self.model0.conv3_2.bias
primals_14 = self.model0.conv3_3.weight
primals_15 = self.model0.conv3_3.bias
primals_16 = self.model0.conv3_4.weight
primals_17 = self.model0.conv3_4.bias
primals_18 = self.model0.conv4_1.weight
primals_19 = self.model0.conv4_1.bias
primals_20 = self.model0.conv4_2.weight
primals_21 = self.model0.conv4_2.bias
primals_22 = self.model0.conv4_3_CPM.weight
primals_23 = self.model0.conv4_3_CPM.bias
primals_24 = self.model0.conv4_4_CPM.weight
primals_25 = self.model0.conv4_4_CPM.bias
primals_26 = self.model1_1.conv5_1_CPM_L1.weight
primals_27 = self.model1_1.conv5_1_CPM_L1.bias
primals_28 = self.model1_1.conv5_2_CPM_L1.weight
primals_29 = self.model1_1.conv5_2_CPM_L1.bias
primals_30 = self.model1_1.conv5_3_CPM_L1.weight
primals_31 = self.model1_1.conv5_3_CPM_L1.bias
primals_32 = self.model1_1.conv5_4_CPM_L1.weight
primals_33 = self.model1_1.conv5_4_CPM_L1.bias
primals_34 = self.model1_1.conv5_5_CPM_L1.weight
primals_35 = self.model1_1.conv5_5_CPM_L1.bias
primals_46 = self.model2_1.Mconv1_stage2_L1.weight
primals_37 = self.model2_1.Mconv1_stage2_L1.bias
primals_48 = self.model2_1.Mconv2_stage2_L1.weight
primals_39 = self.model2_1.Mconv2_stage2_L1.bias
primals_50 = self.model2_1.Mconv3_stage2_L1.weight
primals_41 = self.model2_1.Mconv3_stage2_L1.bias
primals_52 = self.model2_1.Mconv4_stage2_L1.weight
primals_47 = self.model2_1.Mconv4_stage2_L1.bias
primals_54 = self.model2_1.Mconv5_stage2_L1.weight
primals_49 = self.model2_1.Mconv5_stage2_L1.bias
primals_56 = self.model2_1.Mconv6_stage2_L1.weight
primals_51 = self.model2_1.Mconv6_stage2_L1.bias
primals_58 = self.model2_1.Mconv7_stage2_L1.weight
primals_59 = self.model2_1.Mconv7_stage2_L1.bias
primals_60 = self.model3_1.Mconv1_stage3_L1.weight
primals_53 = self.model3_1.Mconv1_stage3_L1.bias
primals_62 = self.model3_1.Mconv2_stage3_L1.weight
primals_55 = self.model3_1.Mconv2_stage3_L1.bias
primals_64 = self.model3_1.Mconv3_stage3_L1.weight
primals_57 = self.model3_1.Mconv3_stage3_L1.bias
primals_66 = self.model3_1.Mconv4_stage3_L1.weight
primals_61 = self.model3_1.Mconv4_stage3_L1.bias
primals_68 = self.model3_1.Mconv5_stage3_L1.weight
primals_63 = self.model3_1.Mconv5_stage3_L1.bias
primals_70 = self.model3_1.Mconv6_stage3_L1.weight
primals_65 = self.model3_1.Mconv6_stage3_L1.bias
primals_86 = self.model3_1.Mconv7_stage3_L1.weight
primals_87 = self.model3_1.Mconv7_stage3_L1.bias
primals_74 = self.model4_1.Mconv1_stage4_L1.weight
primals_67 = self.model4_1.Mconv1_stage4_L1.bias
primals_76 = self.model4_1.Mconv2_stage4_L1.weight
primals_69 = self.model4_1.Mconv2_stage4_L1.bias
primals_78 = self.model4_1.Mconv3_stage4_L1.weight
primals_71 = self.model4_1.Mconv3_stage4_L1.bias
primals_80 = self.model4_1.Mconv4_stage4_L1.weight
primals_75 = self.model4_1.Mconv4_stage4_L1.bias
primals_82 = self.model4_1.Mconv5_stage4_L1.weight
primals_77 = self.model4_1.Mconv5_stage4_L1.bias
primals_84 = self.model4_1.Mconv6_stage4_L1.weight
primals_79 = self.model4_1.Mconv6_stage4_L1.bias
primals_114 = self.model4_1.Mconv7_stage4_L1.weight
primals_115 = self.model4_1.Mconv7_stage4_L1.bias
primals_88 = self.model5_1.Mconv1_stage5_L1.weight
primals_81 = self.model5_1.Mconv1_stage5_L1.bias
primals_90 = self.model5_1.Mconv2_stage5_L1.weight
primals_83 = self.model5_1.Mconv2_stage5_L1.bias
primals_92 = self.model5_1.Mconv3_stage5_L1.weight
primals_85 = self.model5_1.Mconv3_stage5_L1.bias
primals_94 = self.model5_1.Mconv4_stage5_L1.weight
primals_89 = self.model5_1.Mconv4_stage5_L1.bias
primals_96 = self.model5_1.Mconv5_stage5_L1.weight
primals_91 = self.model5_1.Mconv5_stage5_L1.bias
primals_98 = self.model5_1.Mconv6_stage5_L1.weight
primals_93 = self.model5_1.Mconv6_stage5_L1.bias
primals_142 = self.model5_1.Mconv7_stage5_L1.weight
primals_143 = self.model5_1.Mconv7_stage5_L1.bias
primals_102 = self.model6_1.Mconv1_stage6_L1.weight
primals_95 = self.model6_1.Mconv1_stage6_L1.bias
primals_104 = self.model6_1.Mconv2_stage6_L1.weight
primals_97 = self.model6_1.Mconv2_stage6_L1.bias
primals_106 = self.model6_1.Mconv3_stage6_L1.weight
primals_99 = self.model6_1.Mconv3_stage6_L1.bias
primals_108 = self.model6_1.Mconv4_stage6_L1.weight
primals_103 = self.model6_1.Mconv4_stage6_L1.bias
primals_110 = self.model6_1.Mconv5_stage6_L1.weight
primals_105 = self.model6_1.Mconv5_stage6_L1.bias
primals_112 = self.model6_1.Mconv6_stage6_L1.weight
primals_107 = self.model6_1.Mconv6_stage6_L1.bias
primals_170 = self.model6_1.Mconv7_stage6_L1.weight
primals_171 = self.model6_1.Mconv7_stage6_L1.bias
primals_36 = self.model1_2.conv5_1_CPM_L2.weight
primals_109 = self.model1_2.conv5_1_CPM_L2.bias
primals_38 = self.model1_2.conv5_2_CPM_L2.weight
primals_111 = self.model1_2.conv5_2_CPM_L2.bias
primals_40 = self.model1_2.conv5_3_CPM_L2.weight
primals_113 = self.model1_2.conv5_3_CPM_L2.bias
primals_42 = self.model1_2.conv5_4_CPM_L2.weight
primals_43 = self.model1_2.conv5_4_CPM_L2.bias
primals_44 = self.model1_2.conv5_5_CPM_L2.weight
primals_45 = self.model1_2.conv5_5_CPM_L2.bias
primals_116 = self.model2_2.Mconv1_stage2_L2.weight
primals_117 = self.model2_2.Mconv1_stage2_L2.bias
primals_118 = self.model2_2.Mconv2_stage2_L2.weight
primals_119 = self.model2_2.Mconv2_stage2_L2.bias
primals_120 = self.model2_2.Mconv3_stage2_L2.weight
primals_121 = self.model2_2.Mconv3_stage2_L2.bias
primals_122 = self.model2_2.Mconv4_stage2_L2.weight
primals_123 = self.model2_2.Mconv4_stage2_L2.bias
primals_124 = self.model2_2.Mconv5_stage2_L2.weight
primals_125 = self.model2_2.Mconv5_stage2_L2.bias
primals_126 = self.model2_2.Mconv6_stage2_L2.weight
primals_127 = self.model2_2.Mconv6_stage2_L2.bias
primals_72 = self.model2_2.Mconv7_stage2_L2.weight
primals_73 = self.model2_2.Mconv7_stage2_L2.bias
primals_130 = self.model3_2.Mconv1_stage3_L2.weight
primals_131 = self.model3_2.Mconv1_stage3_L2.bias
primals_132 = self.model3_2.Mconv2_stage3_L2.weight
primals_133 = self.model3_2.Mconv2_stage3_L2.bias
primals_134 = self.model3_2.Mconv3_stage3_L2.weight
primals_135 = self.model3_2.Mconv3_stage3_L2.bias
primals_136 = self.model3_2.Mconv4_stage3_L2.weight
primals_137 = self.model3_2.Mconv4_stage3_L2.bias
primals_138 = self.model3_2.Mconv5_stage3_L2.weight
primals_139 = self.model3_2.Mconv5_stage3_L2.bias
primals_140 = self.model3_2.Mconv6_stage3_L2.weight
primals_141 = self.model3_2.Mconv6_stage3_L2.bias
primals_100 = self.model3_2.Mconv7_stage3_L2.weight
primals_101 = self.model3_2.Mconv7_stage3_L2.bias
primals_144 = self.model4_2.Mconv1_stage4_L2.weight
primals_145 = self.model4_2.Mconv1_stage4_L2.bias
primals_146 = self.model4_2.Mconv2_stage4_L2.weight
primals_147 = self.model4_2.Mconv2_stage4_L2.bias
primals_148 = self.model4_2.Mconv3_stage4_L2.weight
primals_149 = self.model4_2.Mconv3_stage4_L2.bias
primals_150 = self.model4_2.Mconv4_stage4_L2.weight
primals_151 = self.model4_2.Mconv4_stage4_L2.bias
primals_152 = self.model4_2.Mconv5_stage4_L2.weight
primals_153 = self.model4_2.Mconv5_stage4_L2.bias
primals_154 = self.model4_2.Mconv6_stage4_L2.weight
primals_155 = self.model4_2.Mconv6_stage4_L2.bias
primals_128 = self.model4_2.Mconv7_stage4_L2.weight
primals_129 = self.model4_2.Mconv7_stage4_L2.bias
primals_158 = self.model5_2.Mconv1_stage5_L2.weight
primals_159 = self.model5_2.Mconv1_stage5_L2.bias
primals_160 = self.model5_2.Mconv2_stage5_L2.weight
primals_161 = self.model5_2.Mconv2_stage5_L2.bias
primals_162 = self.model5_2.Mconv3_stage5_L2.weight
primals_163 = self.model5_2.Mconv3_stage5_L2.bias
primals_164 = self.model5_2.Mconv4_stage5_L2.weight
primals_165 = self.model5_2.Mconv4_stage5_L2.bias
primals_166 = self.model5_2.Mconv5_stage5_L2.weight
primals_167 = self.model5_2.Mconv5_stage5_L2.bias
primals_168 = self.model5_2.Mconv6_stage5_L2.weight
primals_169 = self.model5_2.Mconv6_stage5_L2.bias
primals_156 = self.model5_2.Mconv7_stage5_L2.weight
primals_157 = self.model5_2.Mconv7_stage5_L2.bias
primals_172 = self.model6_2.Mconv1_stage6_L2.weight
primals_173 = self.model6_2.Mconv1_stage6_L2.bias
primals_174 = self.model6_2.Mconv2_stage6_L2.weight
primals_175 = self.model6_2.Mconv2_stage6_L2.bias
primals_176 = self.model6_2.Mconv3_stage6_L2.weight
primals_177 = self.model6_2.Mconv3_stage6_L2.bias
primals_178 = self.model6_2.Mconv4_stage6_L2.weight
primals_179 = self.model6_2.Mconv4_stage6_L2.bias
primals_180 = self.model6_2.Mconv5_stage6_L2.weight
primals_181 = self.model6_2.Mconv5_stage6_L2.bias
primals_182 = self.model6_2.Mconv6_stage6_L2.weight
primals_183 = self.model6_2.Mconv6_stage6_L2.bias
primals_184 = self.model6_2.Mconv7_stage6_L2.weight
primals_185 = self.model6_2.Mconv7_stage6_L2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41, primals_42, primals_43, primals_44,
primals_45, primals_46, primals_47, primals_48, primals_49,
primals_50, primals_51, primals_52, primals_53, primals_54,
primals_55, primals_56, primals_57, primals_58, primals_59,
primals_60, primals_61, primals_62, primals_63, primals_64,
primals_65, primals_66, primals_67, primals_68, primals_69,
primals_70, primals_71, primals_72, primals_73, primals_74,
primals_75, primals_76, primals_77, primals_78, primals_79,
primals_80, primals_81, primals_82, primals_83, primals_84,
primals_85, primals_86, primals_87, primals_88, primals_89,
primals_90, primals_91, primals_92, primals_93, primals_94,
primals_95, primals_96, primals_97, primals_98, primals_99,
primals_100, primals_101, primals_102, primals_103, primals_104,
primals_105, primals_106, primals_107, primals_108, primals_109,
primals_110, primals_111, primals_112, primals_113, primals_114,
primals_115, primals_116, primals_117, primals_118, primals_119,
primals_120, primals_121, primals_122, primals_123, primals_124,
primals_125, primals_126, primals_127, primals_128, primals_129,
primals_130, primals_131, primals_132, primals_133, primals_134,
primals_135, primals_136, primals_137, primals_138, primals_139,
primals_140, primals_141, primals_142, primals_143, primals_144,
primals_145, primals_146, primals_147, primals_148, primals_149,
primals_150, primals_151, primals_152, primals_153, primals_154,
primals_155, primals_156, primals_157, primals_158, primals_159,
primals_160, primals_161, primals_162, primals_163, primals_164,
primals_165, primals_166, primals_167, primals_168, primals_169,
primals_170, primals_171, primals_172, primals_173, primals_174,
primals_175, primals_176, primals_177, primals_178, primals_179,
primals_180, primals_181, primals_182, primals_183, primals_184,
primals_185])
return output[0], output[1]
| pento-group/terran | BodyPoseModel | false | 16,460 | [
"BSD-3-Clause"
] | 62 | 983f18521b149749c944e3b29c86361cb1ecf3a5 | https://github.com/pento-group/terran/tree/983f18521b149749c944e3b29c86361cb1ecf3a5 |
LASympNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/m4/cm4im4jwwbumfxfgwme4weq57wui2tnw6y37wp5yegnjl4xmn5jd.py
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %permute), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2, 2], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2
xnumel = 2
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (x1 + (2*y0)), xmask & ymask)
tmp1 = tl.load(in_ptr0 + (y0 + (2*x1)), xmask & ymask)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x1 + (2*y0)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ts/ctshmjxiubxutvmsloys6a5fvrpex5qymvu4frsjsx7ayrcuit7l.py
# Topologically Sorted Source Nodes: [p_1], Original ATen: [aten.add]
# Source node to ATen node mapping:
# p_1 => add_1
# Graph fragment:
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_1, %view_1), kwargs = {})
triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x1)), xmask)
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/gw/cgwned4evusqkcqe6uszpydjo4c3ukukkcsuzx335co4gztk2er7.py
# Topologically Sorted Source Nodes: [h, q_1, mul_3, q_2], Original ATen: [aten.ones_like, aten.add, aten.mul]
# Source node to ATen node mapping:
# h => full_default
# mul_3 => mul_3
# q_1 => add_3
# q_2 => add_5
# Graph fragment:
# %full_default : [num_users=6] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %slice_2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_5, %full_default), kwargs = {})
# %add_5 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %mul_3), kwargs = {})
triton_poi_fused_add_mul_ones_like_2 = async_compile.triton('triton_poi_fused_add_mul_ones_like_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_ones_like_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_ones_like_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
x1 = (xindex // 2)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (2 + x0 + (4*x1)), xmask)
tmp3 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/qh/cqhpnwewpwjyfgtmqk52q7nzhgxvz64em57i5zqre3aogvvgc44w.py
# Topologically Sorted Source Nodes: [h, mul_2, p_2, sigmoid, mul_5, p_3, p_4], Original ATen: [aten.ones_like, aten.mul, aten.add, aten.sigmoid]
# Source node to ATen node mapping:
# h => full_default
# mul_2 => mul_2
# mul_5 => mul_4
# p_2 => add_4
# p_3 => add_6
# p_4 => add_8
# sigmoid => sigmoid
# Graph fragment:
# %full_default : [num_users=6] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, %full_default), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %mul_2), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_5,), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %primals_6), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %mul_4), kwargs = {})
# %add_8 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_6, %view_5), kwargs = {})
triton_poi_fused_add_mul_ones_like_sigmoid_3 = async_compile.triton('triton_poi_fused_add_mul_ones_like_sigmoid_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_ones_like_sigmoid_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_ones_like_sigmoid_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x2), xmask)
tmp7 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_out_ptr0 + (x2), xmask)
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = tmp0 + tmp3
tmp6 = tl.sigmoid(tmp5)
tmp8 = tmp6 * tmp7
tmp9 = tmp4 + tmp8
tmp11 = tmp9 + tmp10
tl.store(in_out_ptr0 + (x2), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/t3/ct325x4vh4pj6xfs7tunbgh3vtolyog55n4jmsc2wcj5mshznrby.py
# Topologically Sorted Source Nodes: [h, q_3, mul_8, p_5, mul_9, q_4, sigmoid_1, mul_11, q_5], Original ATen: [aten.ones_like, aten.add, aten.mul, aten.sigmoid]
# Source node to ATen node mapping:
# h => full_default
# mul_11 => mul_10
# mul_8 => mul_8
# mul_9 => mul_9
# p_5 => add_11
# q_3 => add_10
# q_4 => add_12
# q_5 => add_13
# sigmoid_1 => sigmoid_1
# Graph fragment:
# %full_default : [num_users=6] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_7, %add_5), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_9, %full_default), kwargs = {})
# %add_11 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_8, %mul_8), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_10, %full_default), kwargs = {})
# %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_10, %mul_9), kwargs = {})
# %sigmoid_1 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_11,), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %primals_11), kwargs = {})
# %add_13 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_10, %add_12), kwargs = {})
triton_poi_fused_add_mul_ones_like_sigmoid_4 = async_compile.triton('triton_poi_fused_add_mul_ones_like_sigmoid_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_ones_like_sigmoid_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_ones_like_sigmoid_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_out_ptr0 + (x2), xmask)
tmp9 = tl.load(in_ptr3 + (x2), xmask)
tmp11 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = tmp0 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp7 = tmp5 * tmp6
tmp10 = tmp8 + tmp9
tmp12 = tmp11 * tmp2
tmp13 = tmp10 + tmp12
tmp14 = tmp7 + tmp13
tl.store(out_ptr0 + (x2), tmp5, xmask)
tl.store(in_out_ptr0 + (x2), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xg/cxghibwkfdbdt37itpfv7yhstbkc3icl466hgbvxh3urpks2tguv.py
# Topologically Sorted Source Nodes: [h, mul_8, p_5, p_6], Original ATen: [aten.ones_like, aten.mul, aten.add]
# Source node to ATen node mapping:
# h => full_default
# mul_8 => mul_8
# p_5 => add_11
# p_6 => add_15
# Graph fragment:
# %full_default : [num_users=6] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_9, %full_default), kwargs = {})
# %add_11 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_8, %mul_8), kwargs = {})
# %add_15 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_11, %view_9), kwargs = {})
triton_poi_fused_add_mul_ones_like_5 = async_compile.triton('triton_poi_fused_add_mul_ones_like_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_ones_like_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_ones_like_5(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr0 + (x2), xmask)
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 + tmp5
tl.store(in_out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/aw/caw4fa3fedeaj5jt4jem6dnfgpcadhfo7hyroeqxces62h6fa3oz.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%add_18, %add_19], -1), kwargs = {})
triton_poi_fused_cat_6 = async_compile.triton('triton_poi_fused_cat_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((2*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = 1.0
tmp8 = tmp6 * tmp7
tmp9 = tmp5 + tmp8
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 4, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tl.load(in_ptr2 + ((2*x1) + ((-2) + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr3 + ((2*x1) + ((-2) + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tmp15 + tmp16
tmp18 = tl.load(in_ptr4 + ((-2) + x0), tmp12 & xmask, eviction_policy='evict_last', other=0.0)
tmp19 = tmp18 * tmp7
tmp20 = tmp17 + tmp19
tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype)
tmp22 = tl.where(tmp12, tmp20, tmp21)
tmp23 = tl.where(tmp4, tmp11, tmp22)
tl.store(out_ptr0 + (x2), tmp23, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 2), (2, 1))
assert_size_stride(primals_3, (2, 2), (2, 1))
assert_size_stride(primals_4, (2, ), (1, ))
assert_size_stride(primals_5, (2, ), (1, ))
assert_size_stride(primals_6, (2, ), (1, ))
assert_size_stride(primals_7, (2, 2), (2, 1))
assert_size_stride(primals_8, (2, 2), (2, 1))
assert_size_stride(primals_9, (2, ), (1, ))
assert_size_stride(primals_10, (2, ), (1, ))
assert_size_stride(primals_11, (2, ), (1, ))
assert_size_stride(primals_12, (2, 2), (2, 1))
assert_size_stride(primals_13, (2, 2), (2, 1))
assert_size_stride(primals_14, (2, ), (1, ))
assert_size_stride(primals_15, (2, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((2, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(primals_2, buf0, 2, 2, grid=grid(2, 2), stream=stream0)
del primals_2
buf1 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, matmul], Original ATen: [aten.add, aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 2), (4, 1), 2), buf0, out=buf1)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [add_2], Original ATen: [aten.add]
triton_poi_fused_add_0.run(primals_3, buf2, 2, 2, grid=grid(2, 2), stream=stream0)
del primals_3
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 2), (32, 8, 2, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [p_1], Original ATen: [aten.add]
triton_poi_fused_add_1.run(buf3, primals_1, 128, grid=grid(128), stream=stream0)
buf4 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf3, (64, 2), (2, 1), 0), buf2, out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 2), (32, 8, 2, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [h, q_1, mul_3, q_2], Original ATen: [aten.ones_like, aten.add, aten.mul]
triton_poi_fused_add_mul_ones_like_2.run(buf5, primals_1, primals_5, 128, grid=grid(128), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((2, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [add_7], Original ATen: [aten.add]
triton_poi_fused_add_0.run(primals_7, buf6, 2, 2, grid=grid(2, 2), stream=stream0)
del primals_7
buf7 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf5, (64, 2), (2, 1), 0), buf6, out=buf7)
buf8 = reinterpret_tensor(buf7, (4, 4, 4, 2), (32, 8, 2, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [h, mul_2, p_2, sigmoid, mul_5, p_3, p_4], Original ATen: [aten.ones_like, aten.mul, aten.add, aten.sigmoid]
triton_poi_fused_add_mul_ones_like_sigmoid_3.run(buf8, buf3, primals_4, buf5, primals_6, 128, grid=grid(128), stream=stream0)
del primals_4
buf9 = empty_strided_cuda((2, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [add_9], Original ATen: [aten.add]
triton_poi_fused_add_0.run(primals_8, buf9, 2, 2, grid=grid(2, 2), stream=stream0)
del primals_8
buf10 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_3], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf8, (64, 2), (2, 1), 0), buf9, out=buf10)
buf11 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
buf12 = reinterpret_tensor(buf10, (4, 4, 4, 2), (32, 8, 2, 1), 0); del buf10 # reuse
# Topologically Sorted Source Nodes: [h, q_3, mul_8, p_5, mul_9, q_4, sigmoid_1, mul_11, q_5], Original ATen: [aten.ones_like, aten.add, aten.mul, aten.sigmoid]
triton_poi_fused_add_mul_ones_like_sigmoid_4.run(buf12, buf8, primals_9, primals_11, buf5, primals_10, buf11, 128, grid=grid(128), stream=stream0)
del primals_10
buf13 = empty_strided_cuda((2, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [add_14], Original ATen: [aten.add]
triton_poi_fused_add_0.run(primals_12, buf13, 2, 2, grid=grid(2, 2), stream=stream0)
del primals_12
buf14 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_4], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf12, (64, 2), (2, 1), 0), buf13, out=buf14)
buf15 = empty_strided_cuda((2, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [add_16], Original ATen: [aten.add]
triton_poi_fused_add_0.run(primals_13, buf15, 2, 2, grid=grid(2, 2), stream=stream0)
del primals_13
buf16 = reinterpret_tensor(buf14, (4, 4, 4, 2), (32, 8, 2, 1), 0); del buf14 # reuse
# Topologically Sorted Source Nodes: [h, mul_8, p_5, p_6], Original ATen: [aten.ones_like, aten.mul, aten.add]
triton_poi_fused_add_mul_ones_like_5.run(buf16, buf8, primals_9, 128, grid=grid(128), stream=stream0)
del primals_9
buf17 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_5], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf16, (64, 2), (2, 1), 0), buf15, out=buf17)
buf18 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
triton_poi_fused_cat_6.run(buf16, primals_14, buf17, buf12, primals_15, buf18, 256, grid=grid(256), stream=stream0)
del buf17
del primals_14
del primals_15
return (buf18, primals_6, primals_11, buf5, buf11, reinterpret_tensor(buf16, (2, 64), (1, 2), 0), reinterpret_tensor(buf15, (2, 2), (1, 2), 0), reinterpret_tensor(buf12, (2, 64), (1, 2), 0), reinterpret_tensor(buf13, (2, 2), (1, 2), 0), reinterpret_tensor(buf8, (2, 64), (1, 2), 0), reinterpret_tensor(buf9, (2, 2), (1, 2), 0), reinterpret_tensor(buf6, (2, 2), (1, 2), 0), reinterpret_tensor(buf3, (2, 64), (1, 2), 0), reinterpret_tensor(buf2, (2, 2), (1, 2), 0), reinterpret_tensor(primals_1, (2, 64), (1, 4), 2), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((2, 2), (2, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((2, 2), (2, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((2, 2), (2, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((2, 2), (2, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((2, 2), (2, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((2, 2), (2, 1), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import torch
import torch.nn as nn
class Module(torch.nn.Module):
"""Standard module format.
"""
def __init__(self):
super(Module, self).__init__()
self.activation = None
self.initializer = None
self.__device = None
self.__dtype = None
@property
def device(self):
return self.__device
@property
def dtype(self):
return self.__dtype
@device.setter
def device(self, d):
if d == 'cpu':
self.cpu()
elif d == 'gpu':
self
else:
raise ValueError
self.__device = d
@dtype.setter
def dtype(self, d):
if d == 'float':
self
elif d == 'double':
self
else:
raise ValueError
self.__dtype = d
@property
def Device(self):
if self.__device == 'cpu':
return torch.device('cpu')
elif self.__device == 'gpu':
return torch.device('cuda')
@property
def Dtype(self):
if self.__dtype == 'float':
return torch.float32
elif self.__dtype == 'double':
return torch.float64
@property
def act(self):
if self.activation == 'sigmoid':
return torch.sigmoid
elif self.activation == 'relu':
return torch.relu
elif self.activation == 'tanh':
return torch.tanh
elif self.activation == 'elu':
return torch.elu
else:
raise NotImplementedError
@property
def Act(self):
if self.activation == 'sigmoid':
return torch.nn.Sigmoid()
elif self.activation == 'relu':
return torch.nn.ReLU()
elif self.activation == 'tanh':
return torch.nn.Tanh()
elif self.activation == 'elu':
return torch.nn.ELU()
else:
raise NotImplementedError
@property
def weight_init_(self):
if self.initializer == 'He normal':
return torch.nn.init.kaiming_normal_
elif self.initializer == 'He uniform':
return torch.nn.init.kaiming_uniform_
elif self.initializer == 'Glorot normal':
return torch.nn.init.xavier_normal_
elif self.initializer == 'Glorot uniform':
return torch.nn.init.xavier_uniform_
elif self.initializer == 'orthogonal':
return torch.nn.init.orthogonal_
elif self.initializer == 'default':
if self.activation == 'relu':
return torch.nn.init.kaiming_normal_
elif self.activation == 'tanh':
return torch.nn.init.orthogonal_
else:
return lambda x: None
else:
raise NotImplementedError
class StructureNN(Module):
"""Structure-oriented neural network used as a general map based on designing architecture.
"""
def __init__(self):
super(StructureNN, self).__init__()
def predict(self, x, returnnp=False):
if not isinstance(x, torch.Tensor):
x = torch.tensor(x, dtype=self.Dtype, device=self.Device)
return self(x).cpu().detach().numpy() if returnnp else self(x)
class LinearModule(Module):
"""Linear symplectic module.
"""
def __init__(self, dim, layers):
super(LinearModule, self).__init__()
self.dim = dim
self.layers = layers
self.params = self.__init_params()
def forward(self, pqh):
p, q, h = pqh
for i in range(self.layers):
S = self.params['S{}'.format(i + 1)]
if i % 2 == 0:
p = p + q @ (S + S.t()) * h
else:
q = p @ (S + S.t()) * h + q
return p + self.params['bp'] * h, q + self.params['bq'] * h
def __init_params(self):
"""Si is distributed N(0, 0.01), and b is set to zero.
"""
d = int(self.dim / 2)
params = nn.ParameterDict()
for i in range(self.layers):
params['S{}'.format(i + 1)] = nn.Parameter((torch.randn([d, d]) *
0.01).requires_grad_(True))
params['bp'] = nn.Parameter(torch.zeros([d]).requires_grad_(True))
params['bq'] = nn.Parameter(torch.zeros([d]).requires_grad_(True))
return params
class ActivationModule(Module):
"""Activation symplectic module.
"""
def __init__(self, dim, activation, mode):
super(ActivationModule, self).__init__()
self.dim = dim
self.activation = activation
self.mode = mode
self.params = self.__init_params()
def forward(self, pqh):
p, q, h = pqh
if self.mode == 'up':
return p + self.act(q) * self.params['a'] * h, q
elif self.mode == 'low':
return p, self.act(p) * self.params['a'] * h + q
else:
raise ValueError
def __init_params(self):
d = int(self.dim / 2)
params = nn.ParameterDict()
params['a'] = nn.Parameter((torch.randn([d]) * 0.01).requires_grad_
(True))
return params
class SympNet(StructureNN):
def __init__(self):
super(SympNet, self).__init__()
self.dim = None
def predict(self, xh, steps=1, keepinitx=False, returnnp=False):
dim = xh.size(-1)
size = len(xh.size())
if dim == self.dim:
pred = [xh]
for _ in range(steps):
pred.append(self(pred[-1]))
else:
x0, h = xh[..., :-1], xh[..., -1:]
pred = [x0]
for _ in range(steps):
pred.append(self(torch.cat([pred[-1], h], dim=-1)))
if keepinitx:
steps = steps + 1
else:
pred = pred[1:]
res = torch.cat(pred, dim=-1).view([-1, steps, self.dim][2 - size:])
return res.cpu().detach().numpy() if returnnp else res
class LASympNet(SympNet):
"""LA-SympNet.
Input: [num, dim] or [num, dim + 1]
Output: [num, dim]
"""
def __init__(self, dim, layers=3, sublayers=2, activation='sigmoid'):
super(LASympNet, self).__init__()
self.dim = dim
self.layers = layers
self.sublayers = sublayers
self.activation = activation
self.modus = self.__init_modules()
def forward(self, pqh):
d = int(self.dim / 2)
if pqh.size(-1) == self.dim + 1:
p, q, h = pqh[..., :d], pqh[..., d:-1], pqh[..., -1:]
elif pqh.size(-1) == self.dim:
p, q, h = pqh[..., :d], pqh[..., d:], torch.ones_like(pqh[..., -1:]
)
else:
raise ValueError
for i in range(self.layers - 1):
LinM = self.modus['LinM{}'.format(i + 1)]
ActM = self.modus['ActM{}'.format(i + 1)]
p, q = ActM([*LinM([p, q, h]), h])
return torch.cat(self.modus['LinMout']([p, q, h]), dim=-1)
def __init_modules(self):
modules = nn.ModuleDict()
for i in range(self.layers - 1):
modules['LinM{}'.format(i + 1)] = LinearModule(self.dim, self.
sublayers)
mode = 'up' if i % 2 == 0 else 'low'
modules['ActM{}'.format(i + 1)] = ActivationModule(self.dim,
self.activation, mode)
modules['LinMout'] = LinearModule(self.dim, self.sublayers)
return modules
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 2
xnumel = 2
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (x1 + 2 * y0), xmask & ymask)
tmp1 = tl.load(in_ptr0 + (y0 + 2 * x1), xmask & ymask)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x1 + 2 * y0), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1), xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_mul_ones_like_2(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
x1 = xindex // 2
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + (2 + x0 + 4 * x1), xmask)
tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_mul_ones_like_sigmoid_3(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x2, xmask)
tmp7 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = tmp0 + tmp3
tmp6 = tl.sigmoid(tmp5)
tmp8 = tmp6 * tmp7
tmp9 = tmp4 + tmp8
tmp11 = tmp9 + tmp10
tl.store(in_out_ptr0 + x2, tmp11, xmask)
@triton.jit
def triton_poi_fused_add_mul_ones_like_sigmoid_4(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_out_ptr0 + x2, xmask)
tmp9 = tl.load(in_ptr3 + x2, xmask)
tmp11 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = tmp0 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp7 = tmp5 * tmp6
tmp10 = tmp8 + tmp9
tmp12 = tmp11 * tmp2
tmp13 = tmp10 + tmp12
tmp14 = tmp7 + tmp13
tl.store(out_ptr0 + x2, tmp5, xmask)
tl.store(in_out_ptr0 + x2, tmp14, xmask)
@triton.jit
def triton_poi_fused_add_mul_ones_like_5(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 + tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (2 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = 1.0
tmp8 = tmp6 * tmp7
tmp9 = tmp5 + tmp8
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1], 4, tl.int64)
tmp15 = tl.load(in_ptr2 + (2 * x1 + (-2 + x0)), tmp12 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr3 + (2 * x1 + (-2 + x0)), tmp12 & xmask,
eviction_policy='evict_last', other=0.0)
tmp17 = tmp15 + tmp16
tmp18 = tl.load(in_ptr4 + (-2 + x0), tmp12 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp19 = tmp18 * tmp7
tmp20 = tmp17 + tmp19
tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype)
tmp22 = tl.where(tmp12, tmp20, tmp21)
tmp23 = tl.where(tmp4, tmp11, tmp22)
tl.store(out_ptr0 + x2, tmp23, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 2), (2, 1))
assert_size_stride(primals_3, (2, 2), (2, 1))
assert_size_stride(primals_4, (2,), (1,))
assert_size_stride(primals_5, (2,), (1,))
assert_size_stride(primals_6, (2,), (1,))
assert_size_stride(primals_7, (2, 2), (2, 1))
assert_size_stride(primals_8, (2, 2), (2, 1))
assert_size_stride(primals_9, (2,), (1,))
assert_size_stride(primals_10, (2,), (1,))
assert_size_stride(primals_11, (2,), (1,))
assert_size_stride(primals_12, (2, 2), (2, 1))
assert_size_stride(primals_13, (2, 2), (2, 1))
assert_size_stride(primals_14, (2,), (1,))
assert_size_stride(primals_15, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((2, 2), (2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(2, 2)](primals_2, buf0, 2, 2, XBLOCK=2,
YBLOCK=2, num_warps=1, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 2), (4, 1), 2),
buf0, out=buf1)
buf2 = buf0
del buf0
triton_poi_fused_add_0[grid(2, 2)](primals_3, buf2, 2, 2, XBLOCK=2,
YBLOCK=2, num_warps=1, num_stages=1)
del primals_3
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 2), (32, 8, 2, 1), 0)
del buf1
triton_poi_fused_add_1[grid(128)](buf3, primals_1, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 2), (2, 1), 0),
buf2, out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 2), (32, 8, 2, 1), 0)
del buf4
triton_poi_fused_add_mul_ones_like_2[grid(128)](buf5, primals_1,
primals_5, 128, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((2, 2), (2, 1), torch.float32)
triton_poi_fused_add_0[grid(2, 2)](primals_7, buf6, 2, 2, XBLOCK=2,
YBLOCK=2, num_warps=1, num_stages=1)
del primals_7
buf7 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (64, 2), (2, 1), 0),
buf6, out=buf7)
buf8 = reinterpret_tensor(buf7, (4, 4, 4, 2), (32, 8, 2, 1), 0)
del buf7
triton_poi_fused_add_mul_ones_like_sigmoid_3[grid(128)](buf8, buf3,
primals_4, buf5, primals_6, 128, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_4
buf9 = empty_strided_cuda((2, 2), (2, 1), torch.float32)
triton_poi_fused_add_0[grid(2, 2)](primals_8, buf9, 2, 2, XBLOCK=2,
YBLOCK=2, num_warps=1, num_stages=1)
del primals_8
buf10 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf8, (64, 2), (2, 1), 0),
buf9, out=buf10)
buf11 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
buf12 = reinterpret_tensor(buf10, (4, 4, 4, 2), (32, 8, 2, 1), 0)
del buf10
triton_poi_fused_add_mul_ones_like_sigmoid_4[grid(128)](buf12, buf8,
primals_9, primals_11, buf5, primals_10, buf11, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_10
buf13 = empty_strided_cuda((2, 2), (2, 1), torch.float32)
triton_poi_fused_add_0[grid(2, 2)](primals_12, buf13, 2, 2, XBLOCK=
2, YBLOCK=2, num_warps=1, num_stages=1)
del primals_12
buf14 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf12, (64, 2), (2, 1), 0),
buf13, out=buf14)
buf15 = empty_strided_cuda((2, 2), (2, 1), torch.float32)
triton_poi_fused_add_0[grid(2, 2)](primals_13, buf15, 2, 2, XBLOCK=
2, YBLOCK=2, num_warps=1, num_stages=1)
del primals_13
buf16 = reinterpret_tensor(buf14, (4, 4, 4, 2), (32, 8, 2, 1), 0)
del buf14
triton_poi_fused_add_mul_ones_like_5[grid(128)](buf16, buf8,
primals_9, 128, XBLOCK=128, num_warps=4, num_stages=1)
del primals_9
buf17 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf16, (64, 2), (2, 1), 0),
buf15, out=buf17)
buf18 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_cat_6[grid(256)](buf16, primals_14, buf17, buf12,
primals_15, buf18, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf17
del primals_14
del primals_15
return buf18, primals_6, primals_11, buf5, buf11, reinterpret_tensor(buf16,
(2, 64), (1, 2), 0), reinterpret_tensor(buf15, (2, 2), (1, 2), 0
), reinterpret_tensor(buf12, (2, 64), (1, 2), 0), reinterpret_tensor(
buf13, (2, 2), (1, 2), 0), reinterpret_tensor(buf8, (2, 64), (1, 2), 0
), reinterpret_tensor(buf9, (2, 2), (1, 2), 0), reinterpret_tensor(buf6
, (2, 2), (1, 2), 0), reinterpret_tensor(buf3, (2, 64), (1, 2), 0
), reinterpret_tensor(buf2, (2, 2), (1, 2), 0), reinterpret_tensor(
primals_1, (2, 64), (1, 4), 2)
class Module(torch.nn.Module):
"""Standard module format.
"""
def __init__(self):
super(Module, self).__init__()
self.activation = None
self.initializer = None
self.__device = None
self.__dtype = None
@property
def device(self):
return self.__device
@property
def dtype(self):
return self.__dtype
@device.setter
def device(self, d):
if d == 'cpu':
self.cpu()
elif d == 'gpu':
self
else:
raise ValueError
self.__device = d
@dtype.setter
def dtype(self, d):
if d == 'float':
self
elif d == 'double':
self
else:
raise ValueError
self.__dtype = d
@property
def Device(self):
if self.__device == 'cpu':
return torch.device('cpu')
elif self.__device == 'gpu':
return torch.device('cuda')
@property
def Dtype(self):
if self.__dtype == 'float':
return torch.float32
elif self.__dtype == 'double':
return torch.float64
@property
def act(self):
if self.activation == 'sigmoid':
return torch.sigmoid
elif self.activation == 'relu':
return torch.relu
elif self.activation == 'tanh':
return torch.tanh
elif self.activation == 'elu':
return torch.elu
else:
raise NotImplementedError
@property
def Act(self):
if self.activation == 'sigmoid':
return torch.nn.Sigmoid()
elif self.activation == 'relu':
return torch.nn.ReLU()
elif self.activation == 'tanh':
return torch.nn.Tanh()
elif self.activation == 'elu':
return torch.nn.ELU()
else:
raise NotImplementedError
@property
def weight_init_(self):
if self.initializer == 'He normal':
return torch.nn.init.kaiming_normal_
elif self.initializer == 'He uniform':
return torch.nn.init.kaiming_uniform_
elif self.initializer == 'Glorot normal':
return torch.nn.init.xavier_normal_
elif self.initializer == 'Glorot uniform':
return torch.nn.init.xavier_uniform_
elif self.initializer == 'orthogonal':
return torch.nn.init.orthogonal_
elif self.initializer == 'default':
if self.activation == 'relu':
return torch.nn.init.kaiming_normal_
elif self.activation == 'tanh':
return torch.nn.init.orthogonal_
else:
return lambda x: None
else:
raise NotImplementedError
class StructureNN(Module):
"""Structure-oriented neural network used as a general map based on designing architecture.
"""
def __init__(self):
super(StructureNN, self).__init__()
def predict(self, x, returnnp=False):
if not isinstance(x, torch.Tensor):
x = torch.tensor(x, dtype=self.Dtype, device=self.Device)
return self(x).cpu().detach().numpy() if returnnp else self(x)
class LinearModule(Module):
"""Linear symplectic module.
"""
def __init__(self, dim, layers):
super(LinearModule, self).__init__()
self.dim = dim
self.layers = layers
self.params = self.__init_params()
def forward(self, pqh):
p, q, h = pqh
for i in range(self.layers):
S = self.params['S{}'.format(i + 1)]
if i % 2 == 0:
p = p + q @ (S + S.t()) * h
else:
q = p @ (S + S.t()) * h + q
return p + self.params['bp'] * h, q + self.params['bq'] * h
def __init_params(self):
"""Si is distributed N(0, 0.01), and b is set to zero.
"""
d = int(self.dim / 2)
params = nn.ParameterDict()
for i in range(self.layers):
params['S{}'.format(i + 1)] = nn.Parameter((torch.randn([d, d]) *
0.01).requires_grad_(True))
params['bp'] = nn.Parameter(torch.zeros([d]).requires_grad_(True))
params['bq'] = nn.Parameter(torch.zeros([d]).requires_grad_(True))
return params
class ActivationModule(Module):
"""Activation symplectic module.
"""
def __init__(self, dim, activation, mode):
super(ActivationModule, self).__init__()
self.dim = dim
self.activation = activation
self.mode = mode
self.params = self.__init_params()
def forward(self, pqh):
p, q, h = pqh
if self.mode == 'up':
return p + self.act(q) * self.params['a'] * h, q
elif self.mode == 'low':
return p, self.act(p) * self.params['a'] * h + q
else:
raise ValueError
def __init_params(self):
d = int(self.dim / 2)
params = nn.ParameterDict()
params['a'] = nn.Parameter((torch.randn([d]) * 0.01).requires_grad_
(True))
return params
class SympNet(StructureNN):
def __init__(self):
super(SympNet, self).__init__()
self.dim = None
def predict(self, xh, steps=1, keepinitx=False, returnnp=False):
dim = xh.size(-1)
size = len(xh.size())
if dim == self.dim:
pred = [xh]
for _ in range(steps):
pred.append(self(pred[-1]))
else:
x0, h = xh[..., :-1], xh[..., -1:]
pred = [x0]
for _ in range(steps):
pred.append(self(torch.cat([pred[-1], h], dim=-1)))
if keepinitx:
steps = steps + 1
else:
pred = pred[1:]
res = torch.cat(pred, dim=-1).view([-1, steps, self.dim][2 - size:])
return res.cpu().detach().numpy() if returnnp else res
class LASympNetNew(SympNet):
"""LA-SympNet.
Input: [num, dim] or [num, dim + 1]
Output: [num, dim]
"""
def __init__(self, dim, layers=3, sublayers=2, activation='sigmoid'):
super(LASympNetNew, self).__init__()
self.dim = dim
self.layers = layers
self.sublayers = sublayers
self.activation = activation
self.modus = self.__init_modules()
def __init_modules(self):
modules = nn.ModuleDict()
for i in range(self.layers - 1):
modules['LinM{}'.format(i + 1)] = LinearModule(self.dim, self.
sublayers)
mode = 'up' if i % 2 == 0 else 'low'
modules['ActM{}'.format(i + 1)] = ActivationModule(self.dim,
self.activation, mode)
modules['LinMout'] = LinearModule(self.dim, self.sublayers)
return modules
def forward(self, input_0):
primals_2 = self.modus.LinM1.params.S1
primals_3 = self.modus.LinM1.params.S2
primals_4 = self.modus.LinM1.params.bp
primals_5 = self.modus.LinM1.params.bq
primals_6 = self.modus.ActM1.params.a
primals_7 = self.modus.LinM2.params.S1
primals_8 = self.modus.LinM2.params.S2
primals_9 = self.modus.LinM2.params.bp
primals_10 = self.modus.LinM2.params.bq
primals_11 = self.modus.ActM2.params.a
primals_12 = self.modus.LinMout.params.S1
primals_13 = self.modus.LinMout.params.S2
primals_14 = self.modus.LinMout.params.bp
primals_15 = self.modus.LinMout.params.bq
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15])
return output[0]
| shushu-qin/deeponet | LASympNet | false | 16,461 | [
"Apache-2.0"
] | 140 | 5bbe066279bba055ad80e04c364140363c87634a | https://github.com/shushu-qin/deeponet/tree/5bbe066279bba055ad80e04c364140363c87634a |
ClassAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ey/cey4yc74rqvkmtcxtse2vt3dw6pfdi3zwtwezx7cdzkykhz4kzp7.py
# Topologically Sorted Source Nodes: [q_1], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# q_1 => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, 1.0), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/4i/c4ifig7r6ck2y725hll5brbiwptsykocbfmp75m5zsqzc4zwa635.py
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# attn => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (8*x2) + (32*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/hg/chg3iq6bscxmmxv5f7tuzgwycb4mgrimwfhv2nauw5rj4tt5cmv2.py
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_1 => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_18, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_18, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/zu/czuvep3dmpmqmhiiliwubh4ghdt2qr27va67sszkua7trziinwov.py
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_1 => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ra/crawzlrlefqew6hbeebyicdqrvbkup3ok24f4lbft23nndfraukx.py
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul_1 => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_4,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (4 + y0 + (8*x2) + (32*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (8, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 8), (1, 4), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 16, 16), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [q_1], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(buf2, 16, grid=grid(16), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf1, buf3, 16, 4, grid=grid(16, 4), stream=stream0)
buf4 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf2, (16, 1, 1), (1, 0, 0), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 0, 1), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf4, buf5, 64, grid=grid(64), stream=stream0)
buf6 = reinterpret_tensor(buf4, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf5, buf6, 64, grid=grid(64), stream=stream0)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf1, buf7, 16, 4, grid=grid(16, 4), stream=stream0)
del buf1
buf8 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf6, (16, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf7, (16, 4, 1), (4, 1, 0), 0), out=buf8)
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cls_embed_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf8, (4, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf9)
del primals_5
return (reinterpret_tensor(buf9, (4, 1, 4), (4, 4, 1), 0), primals_1, buf6, reinterpret_tensor(buf8, (4, 4), (4, 1), 0), primals_4, reinterpret_tensor(buf7, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf2, (16, 1, 1), (1, 1, 1), 0), reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import Tensor
from torch import nn
class ClassAttention(nn.Module):
def __init__(self, dim, num_heads):
super().__init__()
self.num_heads = num_heads
self.scale = (dim // num_heads) ** -0.5
self.q = nn.Linear(dim, dim, bias=False)
self.kv = nn.Linear(dim, dim * 2, bias=False)
self.proj = nn.Linear(dim, dim)
def forward(self, x: 'Tensor') ->Tensor:
B, N, C = x.shape
q = self.q(x[:, :1, :]).reshape(B, self.num_heads, 1, C // self.
num_heads)
q *= self.scale
kv = self.kv(x).reshape(B, N, 2, self.num_heads, C // self.num_heads
).permute(2, 0, 3, 1, 4)
k, v = kv[0], kv[1]
attn = q @ k.transpose(-2, -1)
attn = attn.softmax(dim=-1)
cls_embed = (attn @ v).transpose(1, 2).reshape(B, 1, C)
cls_embed = self.proj(cls_embed)
return cls_embed
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4, 'num_heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 8 * x2 + 32 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (4 + y0 + 8 * x2 + 32 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (8, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 8), (1, 4), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 16, 16), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](buf2, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf3 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32)
triton_poi_fused_clone_1[grid(16, 4)](buf1, buf3, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf2, (16, 1, 1), (1, 0, 0),
0), reinterpret_tensor(buf3, (16, 1, 4), (4, 0, 1), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 64, 1), torch.float32)
triton_poi_fused__softmax_2[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf4
triton_poi_fused__softmax_3[grid(64)](buf5, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf5
triton_poi_fused_clone_4[grid(16, 4)](buf1, buf7, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
del buf1
buf8 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf6, (16, 1, 4), (4, 4, 1),
0), reinterpret_tensor(buf7, (16, 4, 1), (4, 1, 0), 0), out=buf8)
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf8, (4, 4), (4,
1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha
=1, beta=1, out=buf9)
del primals_5
return reinterpret_tensor(buf9, (4, 1, 4), (4, 4, 1), 0
), primals_1, buf6, reinterpret_tensor(buf8, (4, 4), (4, 1), 0
), primals_4, reinterpret_tensor(buf7, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf2, (16, 1, 1), (1, 1, 1), 0
), reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 4), 0)
class ClassAttentionNew(nn.Module):
def __init__(self, dim, num_heads):
super().__init__()
self.num_heads = num_heads
self.scale = (dim // num_heads) ** -0.5
self.q = nn.Linear(dim, dim, bias=False)
self.kv = nn.Linear(dim, dim * 2, bias=False)
self.proj = nn.Linear(dim, dim)
def forward(self, input_0):
primals_2 = self.q.weight
primals_3 = self.kv.weight
primals_4 = self.proj.weight
primals_5 = self.proj.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| sithu31296/image_classification | ClassAttention | false | 16,462 | [
"MIT"
] | 57 | 6b8cbce96100225621cee3166a73e852ba216cc3 | https://github.com/sithu31296/image_classification/tree/6b8cbce96100225621cee3166a73e852ba216cc3 |
InstanceNorm | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/2t/c2tlmlnddgofeoodf622jrs5dgndzay3j3z6iezkat4cpubm2kuk.py
# Topologically Sorted Source Nodes: [mean, x, tmp, mean_1, add, tmp_1, mul_1], Original ATen: [aten.mean, aten.sub, aten.mul, aten.add, aten.rsqrt]
# Source node to ATen node mapping:
# add => add
# mean => mean
# mean_1 => mean_1
# mul_1 => mul_1
# tmp => mul
# tmp_1 => rsqrt
# x => sub
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [2, 3], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %mean), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %sub), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mul, [2, 3], True), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-08), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
triton_per_fused_add_mean_mul_rsqrt_sub_0 = async_compile.triton('triton_per_fused_add_mean_mul_rsqrt_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_mul_rsqrt_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mean_mul_rsqrt_sub_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tmp7 = tmp0 - tmp6
tmp8 = tmp7 * tmp7
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tmp13 = tmp12 / tmp5
tmp14 = 1e-08
tmp15 = tmp13 + tmp14
tmp16 = libdevice.rsqrt(tmp15)
tmp17 = tmp7 * tmp16
tl.store(out_ptr2 + (r1 + (16*x0)), tmp17, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean, x, tmp, mean_1, add, tmp_1, mul_1], Original ATen: [aten.mean, aten.sub, aten.mul, aten.add, aten.rsqrt]
stream0 = get_raw_stream(0)
triton_per_fused_add_mean_mul_rsqrt_sub_0.run(arg0_1, buf2, 16, 16, grid=grid(16), stream=stream0)
del arg0_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch
from torch import nn
class InstanceNorm(nn.Module):
def __init__(self, epsilon=1e-08):
super(InstanceNorm, self).__init__()
self.epsilon = epsilon
def forward(self, x):
x = x - torch.mean(x, (2, 3), True)
tmp = torch.mul(x, x)
tmp = torch.rsqrt(torch.mean(tmp, (2, 3), True) + self.epsilon)
return x * tmp
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
import torch
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mean_mul_rsqrt_sub_0(in_ptr0, out_ptr2, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tmp7 = tmp0 - tmp6
tmp8 = tmp7 * tmp7
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tmp13 = tmp12 / tmp5
tmp14 = 1e-08
tmp15 = tmp13 + tmp14
tmp16 = libdevice.rsqrt(tmp15)
tmp17 = tmp7 * tmp16
tl.store(out_ptr2 + (r1 + 16 * x0), tmp17, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_mean_mul_rsqrt_sub_0[grid(16)](arg0_1, buf2,
16, 16, XBLOCK=8, num_warps=2, num_stages=1)
del arg0_1
return buf2,
class InstanceNormNew(nn.Module):
def __init__(self, epsilon=1e-08):
super(InstanceNormNew, self).__init__()
self.epsilon = epsilon
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| siyuhuang/PoseStylizer | InstanceNorm | false | 16,463 | [
"BSD-3-Clause"
] | 75 | d1d832781ddfd3efde24bf32b36a4074fafebcc1 | https://github.com/siyuhuang/PoseStylizer/tree/d1d832781ddfd3efde24bf32b36a4074fafebcc1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.